Unverified Commit 69d56f45 authored by Yu Shi Jie's avatar Yu Shi Jie Committed by GitHub
Browse files

Mmlu Pro (#1961)



* initialized mmlu_pro task

* added generative mmlu-pro

* added cot fewshot for mmlu-pro

* Initial commit

* updated mmlu-pro to take on 3 splits: test, val, dev

* mmlu-pro: added continuation and flan_cot_zeroshot

* added README.md for mmlu_pro

* removed

* update files

* moved files out, and removed unused versions

* updated

* mmlu_pro:

-changed task 'other' to 'miscellaneous'
there is already a group named 'other'
task and group with the same alias (e.g. mmlu_pro_other_generative) throws an error

-fixed yaml backslash escape for fewshot cot

* changed choices -> options in yaml config to fit dataset schema

* ONLY FOR DEFAULT: fixed yaml file to use variable number of choices

* mmlu-pro: fixed doc_to_text/choice/target configs for all variants

* mmlu-pro: minor fixes

* mmlu-pro/default: aligned with mmlu updates

* mmlu-pro: update yaml content in line with mmlu

* mmlu-pro: fixed mislabelling of task (math->chemistry)

* mmlu-pro: fixed yaml formatting

* add custom fewshot doc_to_text, target, and choice

* add process for each subtask

* add process for each subtask

* pre-commit

* pre-commit

* format

* resolved left out merge

* deleted folders + updated readme

* Update evaluator.py

* Update evaluator.py

---------
Co-authored-by: default avatarYu Shi Jie <shijie@tensorplex.ai>
Co-authored-by: default avatarlintangsutawika <lintang@eleuther.ai>
Co-authored-by: default avatarroot <root@455bdd73-01.cloud.together.ai>
Co-authored-by: default avatarLintang Sutawika <lintang@sutawika.com>
parent c2168869
from functools import partial
import datasets
......@@ -15,9 +17,38 @@ class ContextSampler:
self.target_delimiter = self.config.target_delimiter
self.fewshot_delimiter = self.config.fewshot_delimiter
self.doc_to_text = self.task.doc_to_text
self.doc_to_target = self.task.doc_to_target
self.doc_to_choice = self.task.doc_to_choice
if (
self.config.fewshot_config is not None
and self.config.fewshot_config.get("doc_to_text", None) is not None
):
self.doc_to_text = partial(
self.task.doc_to_text,
doc_to_text=self.config.fewshot_config.get("doc_to_text", None),
)
else:
self.doc_to_text = self.task.doc_to_text
if (
self.config.fewshot_config is not None
and self.config.fewshot_config.get("doc_to_target", None) is not None
):
self.doc_to_target = partial(
self.task.doc_to_target,
doc_to_target=self.config.fewshot_config.get("doc_to_target", None),
)
else:
self.doc_to_target = self.task.doc_to_target
if (
self.config.fewshot_config is not None
and self.config.fewshot_config.get("doc_to_choice", None) is not None
):
self.doc_to_choice = partial(
self.task.doc_to_choice,
doc_to_choice=self.config.fewshot_config.get("doc_to_choice", None),
)
else:
self.doc_to_choice = self.task.doc_to_choice
self.docs = docs # HF dataset split, provided by task._fewshot_docs()
if fewshot_indices: # subset few-shot docs from
......@@ -52,14 +83,15 @@ class ContextSampler:
else self.doc_to_choice(doc)[doc_content]
)
labeled_examples += self.target_delimiter
labeled_examples += (
str(doc_target[0])
if isinstance(doc_target, list)
else str(doc_target)
if self.config.doc_to_choice is None or isinstance(doc_target, str)
else str(self.doc_to_choice(doc)[doc_target])
)
labeled_examples += self.fewshot_delimiter
if doc_target != "":
labeled_examples += (
str(doc_target[0])
if isinstance(doc_target, list)
else doc_target
if self.config.doc_to_choice is None or isinstance(doc_target, str)
else str(self.doc_to_choice(doc)[doc_target])
)
labeled_examples += self.fewshot_delimiter
return labeled_examples
......
......@@ -1171,9 +1171,11 @@ class ConfigurableTask(Task):
"""
return doc
def doc_to_text(self, doc):
def doc_to_text(self, doc, doc_to_text=None):
if self.prompt is not None:
doc_to_text = self.prompt
elif doc_to_text is not None:
doc_to_text = doc_to_text
else:
doc_to_text = self.config.doc_to_text
......@@ -1205,9 +1207,11 @@ class ConfigurableTask(Task):
print(type(doc_to_text))
raise TypeError
def doc_to_target(self, doc: Mapping) -> Union[int, str, list]:
def doc_to_target(self, doc: Mapping, doc_to_target=None) -> Union[int, str, list]:
if self.prompt is not None:
doc_to_target = self.prompt
elif doc_to_target is not None:
doc_to_target = doc_to_target
else:
doc_to_target = self.config.doc_to_target
......@@ -1249,9 +1253,11 @@ class ConfigurableTask(Task):
else:
raise TypeError
def doc_to_choice(self, doc: Any) -> List[str]:
def doc_to_choice(self, doc: Any, doc_to_choice=None) -> List[str]:
if self.prompt is not None:
doc_to_choice = self.prompt
elif doc_to_choice is not None:
doc_to_choice = doc_to_choice
elif self.config.doc_to_choice is None:
eval_logger.error("doc_to_choice was called but not set in config")
else:
......
# mmlu_pro
### Paper
Title: `MMLU-Pro: A More Robust and Challenging Multi-Task Language Understanding Benchmark`
Abstract: `In the age of large-scale language models, benchmarks like the Massive Multitask Language Understanding (MMLU) have been pivotal in pushing the boundaries of what AI can achieve in language comprehension and reasoning across diverse domains. However, as models continue to improve, their performance on these benchmarks has begun to plateau, making it increasingly difficult to discern differences in model capabilities. This paper introduces MMLU-Pro, an enhanced dataset designed to extend the mostly knowledge-driven MMLU benchmark by integrating more challenging, reasoning-focused questions and expanding the choice set from four to ten options. Additionally, MMLU-Pro eliminates the trivial and noisy questions in MMLU. Our experimental results show that MMLU-Pro not only raises the challenge, causing a significant drop in accuracy by 16% to 33% compared to MMLU but also demonstrates greater stability under varying prompts. With 24 different prompt styles tested, the sensitivity of model scores to prompt variations decreased from 4-5% in MMLU to just 2% in MMLU-Pro. Additionally, we found that models utilizing Chain of Thought (CoT) reasoning achieved better performance on MMLU-Pro compared to direct answering, which is in stark contrast to the findings on the original MMLU, indicating that MMLU-Pro includes more complex reasoning questions. Our assessments confirm that MMLU-Pro is a more discriminative benchmark to better track progress in the field.`
Homepage: https://huggingface.co/datasets/TIGER-Lab/MMLU-Pro
### Citation
```bibtex
@misc{wang2024mmlupro,
title={MMLU-Pro: A More Robust and Challenging Multi-Task Language Understanding Benchmark},
author={Yubo Wang and Xueguang Ma and Ge Zhang and Yuansheng Ni and Abhranil Chandra and Shiguang Guo and Weiming Ren and Aaran Arulraj and Xuan He and Ziyan Jiang and Tianle Li and Max Ku and Kai Wang and Alex Zhuang and Rongqi Fan and Xiang Yue and Wenhu Chen},
year={2024},
eprint={2406.01574},
archivePrefix={arXiv},
primaryClass={id='cs.CL' full_name='Computation and Language' is_active=True alt_name='cmp-lg' in_archive='cs' is_general=False description='Covers natural language processing. Roughly includes material in ACM Subject Class I.2.7. Note that work on artificial languages (programming languages, logics, formal systems) that does not explicitly address natural-language issues broadly construed (natural-language processing, computational linguistics, speech, text retrieval, etc.) is not appropriate for this area.'}
}
```
### Groups and Tasks
#### Groups
* `mmlu_pro`: 'All 14 subjects of the mmlu_pro dataset, evaluated following the methodology in mmlu's original implementation'
#### Tasks
The following tasks evaluate subjects in the mmlu_pro dataset
- `mmlu_pro_biology`
- `mmlu_pro_business`
- `mmlu_pro_chemistry`
- `mmlu_pro_computer_science`
- `mmlu_pro_economics`
- `mmlu_pro_engineering`
- `mmlu_pro_health`
- `mmlu_pro_history`
- `mmlu_pro_law`
- `mmlu_pro_math`
- `mmlu_pro_other`
- `mmlu_pro_philosophy`
- `mmlu_pro_physics`
- `mmlu_pro_psychology`
### Checklist
For adding novel benchmarks/datasets to the library:
* [x] Is the task an existing benchmark in the literature?
* [x] Have you referenced the original paper that introduced the task?
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted?
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
dataset_path: TIGER-Lab/MMLU-Pro
test_split: test
fewshot_split: validation
fewshot_config:
sampler: first_n
doc_to_text: !function utils.fewshot_to_text
doc_to_target: ""
output_type: generate_until
doc_to_text: !function utils.doc_to_text
doc_to_target: answer
filter_list:
- name: "custom-extract"
filter:
- function: "regex"
regex_pattern: r"answer is \(?([ABCDEFGHIJ])\)?"
# regex_pattern: r".*[aA]nswer:\s*([A-J])",
- function: "take_first"
generation_kwargs:
until:
- "</s>"
- "Q:"
- "<|im_end|>"
do_sample: false
temperature: 0.0
num_fewshot: 5
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
metadata:
version: 0.0
description: "The following are multiple choice questions (with answers) about biology. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_biology"
task_alias: "biology"
process_docs: !function utils.process_biology
description: "The following are multiple choice questions (with answers) about business. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_business"
task_alias: "business"
process_docs: !function utils.process_business
description: "The following are multiple choice questions (with answers) about chemistry. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_chemistry"
task_alias: "chemistry"
process_docs: !function utils.process_chemistry
description: "The following are multiple choice questions (with answers) about computer science. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_computer_science"
task_alias: "computer_science"
process_docs: !function utils.process_computer_science
description: "The following are multiple choice questions (with answers) about economics. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_economics"
task_alias: "economics"
process_docs: !function utils.process_economics
description: "The following are multiple choice questions (with answers) about engineering. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_engineering"
task_alias: "engineering"
process_docs: !function utils.process_engineering
description: "The following are multiple choice questions (with answers) about health. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_health"
task_alias: "health"
process_docs: !function utils.process_health
description: "The following are multiple choice questions (with answers) about history. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_history"
task_alias: "history"
process_docs: !function utils.process_history
description: "The following are multiple choice questions (with answers) about law. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_law"
task_alias: "law"
process_docs: !function utils.process_law
description: "The following are multiple choice questions (with answers) about math. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_math"
task_alias: "math"
process_docs: !function utils.process_math
description: "The following are multiple choice questions (with answers) about other. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_other"
task_alias: "other"
process_docs: !function utils.process_other
description: "The following are multiple choice questions (with answers) about philosophy. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_philosophy"
task_alias: "philosophy"
process_docs: !function utils.process_philosophy
description: "The following are multiple choice questions (with answers) about physics. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_physics"
task_alias: "physics"
process_docs: !function utils.process_physics
description: "The following are multiple choice questions (with answers) about psychology. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
include: "_default_template_yaml"
task: "mmlu_pro_psychology"
task_alias: "psychology"
process_docs: !function utils.process_psychology
from functools import partial
choices = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
]
def format_cot_example(example, including_answer=True):
prompt = "Question:\n"
question = example["question"]
options = example["options"]
prompt += question + "\n"
prompt += "Options:\n"
for i, opt in enumerate(options):
prompt += "{}. {}\n".format(choices[i], opt)
if including_answer:
cot_content = example["cot_content"].replace(
"A: Let's think step by step.", "Answer: Let's think step by step."
)
prompt += cot_content + "\n\n"
else:
prompt += "Answer: Let's think step by step."
return prompt
doc_to_text = partial(format_cot_example, including_answer=False)
fewshot_to_text = partial(format_cot_example, including_answer=True)
def process_docs(dataset, subject):
return dataset.filter(lambda x: x["category"] == subject)
process_biology = partial(process_docs, subject="biology")
process_business = partial(process_docs, subject="business")
process_chemistry = partial(process_docs, subject="chemistry")
process_computer_science = partial(process_docs, subject="computer_science")
process_economics = partial(process_docs, subject="economics")
process_engineering = partial(process_docs, subject="engineering")
process_health = partial(process_docs, subject="health")
process_history = partial(process_docs, subject="history")
process_law = partial(process_docs, subject="law")
process_math = partial(process_docs, subject="math")
process_other = partial(process_docs, subject="other")
process_philosophy = partial(process_docs, subject="philosophy")
process_physics = partial(process_docs, subject="physics")
process_psychology = partial(process_docs, subject="psychology")
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment