Commit abd17276 authored by Baber's avatar Baber
Browse files

Merge branch 'smolrefact' into tasklist

# Conflicts:
#	lm_eval/__main__.py
#	lm_eval/api/group.py
#	lm_eval/api/task.py
#	lm_eval/evaluator_utils.py
#	lm_eval/tasks/__init__.py
#	lm_eval/utils.py
#	pyproject.toml
parents 00afd536 70314843
"dataset_name": "virology"
"description":
"The following are multiple choice questions (with answers) about virology.\n\
\n"
"tag": "mmlu_other_generative_spanish"
"include": "_default_template_spanish_yaml"
"task": "mmlu_virology_generative_spanish"
"task_alias": "virology_spanish"
"dataset_name": "world_religions"
"description":
"The following are multiple choice questions (with answers) about world\
\ religions.\n\n"
"tag": "mmlu_humanities_generative_spanish"
"include": "_default_template_spanish_yaml"
"task": "mmlu_world_religions_generative_spanish"
"task_alias": "world_religions_spanish"
task: "mmlu_redux_spanish"
dataset_path: amias-mx/mmlu-redux-2.0-spanish
dataset_name: abstract_algebra
test_split: test
output_type: multiple_choice
doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:"
doc_to_choice: ["A", "B", "C", "D"]
doc_to_target: answer
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
dataset_kwargs:
trust_remote_code: true
# Task-name
### Paper
Title: `Are We Donewith MMLU?`
Abstract: `https://arxiv.org/pdf/2406.04127`
`The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more.`
Homepage: `https://huggingface.co/datasets/edinburgh-dawg/mmlu-redux-2.0`
### Citation
```
BibTeX
@misc{edinburgh2024mmlu,
title={Are We Done with MMLU?},
author={Aryo Pradipta Gema and Joshua Ong Jun Leang and Giwon Hong and Alessio Devoto and
Alberto Carlo Maria Mancino and Rohit Saxena and Xuanli He and Yu Zhao and Xiaotang Du and
MohammadRezaGhasemi Madani and Claire Barale and Robert McHardy and Joshua Harris and
Jean Kaddour and Emile van Krieken and Pasquale Minervini},
year={2025},
eprint={2406.04127},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
### Groups, Tags, and Tasks
#### Groups
- `stem`
- `other`
- `social sciences`
- `humanities`
#### Tasks
- `mmlu_stem_generative`
- `mmlu_other_generative`
- `mmlu_social_sciences_generative`
- `mmlu_humanities_generative`
### Checklist
For adding novel benchmarks/datasets to the library:
- [x] Is the task an existing benchmark in the literature?
- [x] Have you referenced the original paper that introduced the task?
- [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
- [ ] Is the "Main" variant of this task clearly denoted?
- [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
- [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
ver 1: PR #2705
First implementation
dataset_path: "edinburgh-dawg/mmlu-redux-2.0"
test_split: test
dataset_kwargs:
trust_remote_code: true
output_type: generate_until
doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nPlease respond with the correct letter (A, B, C or D) without any additional comments, only the correct letter:"
doc_to_target: "{{['A','B','C','D'][answer]}}"
target_delimiter: ":"
generation_kwargs:
until:
- "</s>"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
# IMPORTANT: rename your filter to "default" so older harness automatically applies it.
filter_list:
- name: default
filter:
# This captures the first single capital letter A/B/C/D
- function: regex
regex_pattern: "([ABCD])"
- function: take_first
metadata:
version: 3.0
group: mmlu_redux_generative
group_alias: mmlu_redux (generative)
task:
- group: stem
task:
- mmlu_stem_generative
aggregate_metric_list:
- metric: exact_match
weight_by_size: true
- group: other
task:
- mmlu_other_generative
aggregate_metric_list:
- metric: exact_match
weight_by_size: true
- group: social sciences
task:
- mmlu_social_sciences_generative
aggregate_metric_list:
- metric: exact_match
weight_by_size: true
- group: humanities
task:
- mmlu_humanities_generative
aggregate_metric_list:
- metric: exact_match
weight_by_size: true
aggregate_metric_list:
- aggregation: mean
metric: exact_match
weight_by_size: true
metadata:
version: 3
"dataset_name": "abstract_algebra"
"description": "The following are multiple choice questions (with answers) about abstract\
\ algebra.\n\n"
"tag": "mmlu_stem_generative"
"include": "_default_template_yaml"
"task": "mmlu_abstract_algebra_generative"
"task_alias": "abstract_algebra"
"dataset_name": "anatomy"
"description": "The following are multiple choice questions (with answers) about anatomy.\n\
\n"
"tag": "mmlu_stem_generative"
"include": "_default_template_yaml"
"task": "mmlu_anatomy_generative"
"task_alias": "anatomy"
"dataset_name": "astronomy"
"description": "The following are multiple choice questions (with answers) about astronomy.\n\
\n"
"tag": "mmlu_stem_generative"
"include": "_default_template_yaml"
"task": "mmlu_astronomy_generative"
"task_alias": "astronomy"
"dataset_name": "business_ethics"
"description": "The following are multiple choice questions (with answers) about business\
\ ethics.\n\n"
"tag": "mmlu_other_generative"
"include": "_default_template_yaml"
"task": "mmlu_business_ethics_generative"
"task_alias": "business_ethics"
"dataset_name": "clinical_knowledge"
"description": "The following are multiple choice questions (with answers) about clinical\
\ knowledge.\n\n"
"tag": "mmlu_other_generative"
"include": "_default_template_yaml"
"task": "mmlu_clinical_knowledge_generative"
"task_alias": "clinical_knowledge"
"dataset_name": "college_biology"
"description": "The following are multiple choice questions (with answers) about college\
\ biology.\n\n"
"tag": "mmlu_stem_generative"
"include": "_default_template_yaml"
"task": "mmlu_college_biology_generative"
"task_alias": "college_biology"
"dataset_name": "college_chemistry"
"description": "The following are multiple choice questions (with answers) about college\
\ chemistry.\n\n"
"tag": "mmlu_stem_generative"
"include": "_default_template_yaml"
"task": "mmlu_college_chemistry_generative"
"task_alias": "college_chemistry"
"dataset_name": "college_computer_science"
"description": "The following are multiple choice questions (with answers) about college\
\ computer science.\n\n"
"tag": "mmlu_stem_generative"
"include": "_default_template_yaml"
"task": "mmlu_college_computer_science_generative"
"task_alias": "college_computer_science"
"dataset_name": "college_mathematics"
"description": "The following are multiple choice questions (with answers) about college\
\ mathematics.\n\n"
"tag": "mmlu_stem_generative"
"include": "_default_template_yaml"
"task": "mmlu_college_mathematics_generative"
"task_alias": "college_mathematics"
"dataset_name": "college_medicine"
"description": "The following are multiple choice questions (with answers) about college\
\ medicine.\n\n"
"tag": "mmlu_other_generative"
"include": "_default_template_yaml"
"task": "mmlu_college_medicine_generative"
"task_alias": "college_medicine"
"dataset_name": "college_physics"
"description": "The following are multiple choice questions (with answers) about college\
\ physics.\n\n"
"tag": "mmlu_stem_generative"
"include": "_default_template_yaml"
"task": "mmlu_college_physics_generative"
"task_alias": "college_physics"
"dataset_name": "computer_security"
"description": "The following are multiple choice questions (with answers) about computer\
\ security.\n\n"
"tag": "mmlu_stem_generative"
"include": "_default_template_yaml"
"task": "mmlu_computer_security_generative"
"task_alias": "computer_security"
"dataset_name": "conceptual_physics"
"description": "The following are multiple choice questions (with answers) about conceptual\
\ physics.\n\n"
"tag": "mmlu_stem_generative"
"include": "_default_template_yaml"
"task": "mmlu_conceptual_physics_generative"
"task_alias": "conceptual_physics"
"dataset_name": "econometrics"
"description": "The following are multiple choice questions (with answers) about econometrics.\n\
\n"
"tag": "mmlu_social_sciences_generative"
"include": "_default_template_yaml"
"task": "mmlu_econometrics_generative"
"task_alias": "econometrics"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment