Commit bc4b922c authored by Baber's avatar Baber
Browse files

Merge branch 'main' into llama

# Conflicts:
#	lm_eval/tasks/llama3/README.md
parents 748eb47e b2c090cc
......@@ -7,7 +7,7 @@ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
### Context: {doc["context"]}
### Question: {doc["question"]}
### Options:
(1) {doc['option#1']}\n(2) {doc["option#2"]}\n(3) {doc["option#3"]}\n(4) {doc['option#4']}\n(5) {doc['option#5']}
(1) {doc["option#1"]}\n(2) {doc["option#2"]}\n(3) {doc["option#3"]}\n(4) {doc["option#4"]}\n(5) {doc["option#5"]}
### Answer: 주어진 문제의 정답은"""
out_doc = {
......
......@@ -258,7 +258,7 @@ def doc_to_text(src: str, tgt: str) -> str:
src_name, tgt_name = map(code_to_language_name, [src, tgt])
return f"""\
{src_name} sentence: {jinja_var('sentence_' + src)}
{src_name} sentence: {jinja_var("sentence_" + src)}
{tgt_name} sentence:"""
......
# Global-MMLU
### Paper
Title: `Global MMLU: Understanding and Addressing Cultural and Linguistic Biases in Multilingual Evaluation`
Abstract: [https://arxiv.org/abs/2412.03304](https://arxiv.org/abs/2412.03304)
Global-MMLU 🌍 is a multilingual evaluation set spanning 42 languages, including English. This dataset combines machine translations for MMLU questions along with professional translations and crowd-sourced post-edits. It also includes cultural sensitivity annotations for a subset of the questions (2850 questions per language) and classifies them as Culturally Sensitive (CS) 🗽 or Culturally Agnostic (CA) ⚖️. These annotations were collected as part of an open science initiative led by Cohere For AI in collaboration with many external collaborators from both industry and academia.
Global-MMLU-Lite is a balanced collection of culturally sensitive and culturally agnostic MMLU tasks. It is designed for efficient evaluation of multilingual models in 15 languages (including English). Only languages with human translations and post-edits in the original [Global-MMLU](https://huggingface.co/datasets/CohereForAI/Global-MMLU) 🌍 dataset have been included in the lite version.
Homepage: \
[https://huggingface.co/datasets/CohereForAI/Global-MMLU](https://huggingface.co/datasets/CohereForAI/Global-MMLU) \
[https://huggingface.co/datasets/CohereForAI/Global-MMLU-Lite](https://huggingface.co/datasets/CohereForAI/Global-MMLU-Lite)
#### Groups
* `global_mmlu_{lang}`: This group uses `Global-MMLU-Lite` benchmark which supports 14 languages.
* `global_mmlu_full_{lang}`: This group uses `Global-MMLU` benchmark which supports 42 languages.
#### Subgroups (support only for `full` version)
* `global_mmlu_full_stem`
* `global_mmlu_full_humanities`
* `global_mmlu_full_social_sciences`
* `global_mmlu_full_other`
### Citation
```bibtex
@misc{singh2024globalmmluunderstandingaddressing,
title={Global MMLU: Understanding and Addressing Cultural and Linguistic Biases in Multilingual Evaluation},
author={Shivalika Singh and Angelika Romanou and Clémentine Fourrier and David I. Adelani and Jian Gang Ngui and Daniel Vila-Suero and Peerat Limkonchotiwat and Kelly Marchisio and Wei Qi Leong and Yosephine Susanto and Raymond Ng and Shayne Longpre and Wei-Yin Ko and Madeline Smith and Antoine Bosselut and Alice Oh and Andre F. T. Martins and Leshem Choshen and Daphne Ippolito and Enzo Ferrante and Marzieh Fadaee and Beyza Ermis and Sara Hooker},
year={2024},
eprint={2412.03304},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2412.03304},
}
```
tag:
- global_mmlu
dataset_path: CohereForAI/Global-MMLU-Lite
test_split: test
fewshot_split: dev
fewshot_config:
sampler: default
output_type: multiple_choice
doc_to_text: "{{question.strip()}}\nA. {{option_a}}\nB. {{option_b}}\nC. {{option_c}}\nD. {{option_d}}\nAnswer:"
doc_to_choice: ["A", "B", "C", "D"]
doc_to_target: answer
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
metadata:
version: 0.0
import yaml
languages = [
"en",
"ar",
"fr",
"es",
"hi",
"de",
"id",
"it",
"ja",
"ko",
"pt",
"zh",
"yo",
"bn",
"sw",
]
def main() -> None:
for language in languages:
file_name = f"global_mmlu_{language}.yaml"
try:
with open(f"{file_name}", "w") as f:
f.write("# Generated by _generate_configs.py\n")
yaml.dump(
{
"include": "_default_yaml",
"task": f"global_mmlu_{language}",
"dataset_name": language,
},
f,
)
except FileExistsError:
pass
if __name__ == "__main__":
main()
# Generated by _generate_configs.py
dataset_name: ar
include: _default_yaml
task: global_mmlu_ar
# Generated by _generate_configs.py
dataset_name: bn
include: _default_yaml
task: global_mmlu_bn
# Generated by _generate_configs.py
dataset_name: de
include: _default_yaml
task: global_mmlu_de
# Generated by _generate_configs.py
dataset_name: en
include: _default_yaml
task: global_mmlu_en
# Generated by _generate_configs.py
dataset_name: es
include: _default_yaml
task: global_mmlu_es
# Generated by _generate_configs.py
dataset_name: fr
include: _default_yaml
task: global_mmlu_fr
# Generated by _generate_configs.py
dataset_name: hi
include: _default_yaml
task: global_mmlu_hi
# Generated by _generate_configs.py
dataset_name: id
include: _default_yaml
task: global_mmlu_id
# Generated by _generate_configs.py
dataset_name: it
include: _default_yaml
task: global_mmlu_it
# Generated by _generate_configs.py
dataset_name: ja
include: _default_yaml
task: global_mmlu_ja
# Generated by _generate_configs.py
dataset_name: ko
include: _default_yaml
task: global_mmlu_ko
# Generated by _generate_configs.py
dataset_name: pt
include: _default_yaml
task: global_mmlu_pt
# Generated by _generate_configs.py
dataset_name: sw
include: _default_yaml
task: global_mmlu_sw
# Generated by _generate_configs.py
dataset_name: yo
include: _default_yaml
task: global_mmlu_yo
# Generated by _generate_configs.py
dataset_name: zh
include: _default_yaml
task: global_mmlu_zh
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment