Commit 8fada609 authored by Baber's avatar Baber
Browse files

Merge branch 'main' into mathvista

parents 0007b74a 1208afd3
"dataset_name": "Prof Law"
"tag": "arabicmmlu_humanities_tasks"
"include": "_default_arabicmmlu_template_yaml"
"task": "arabicmmlu_prof_law"
"task_alias": "Prof Law"
"dataset_name": "Social Science (Middle School)"
"include": "_default_arabicmmlu_template_yaml"
"tag": "arabicmmlu_social_science_tasks"
"task": "arabicmmlu_social_science_middle_school"
"task_alias": "Social Science (Middle School)"
"dataset_name": "Social Science (Primary School)"
"include": "_default_arabicmmlu_template_yaml"
"tag": "arabicmmlu_social_science_tasks"
"task": "arabicmmlu_social_science_primary_school"
"task_alias": "Social Science (Primary School)"
"dataset_name": "Univ Accounting"
"tag": "arabicmmlu_social_science_tasks"
"include": "_default_arabicmmlu_template_yaml"
"task": "arabicmmlu_univ_accounting"
"task_alias": "Univ Accounting"
"dataset_name": "Univ Computer Science"
"tag": "arabicmmlu_stem_tasks"
"include": "_default_arabicmmlu_template_yaml"
"task": "arabicmmlu_univ_computer_science"
"task_alias": "Univ Computer Science"
"dataset_name": "Univ Economics"
"tag": "arabicmmlu_social_science_tasks"
"include": "_default_arabicmmlu_template_yaml"
"task": "arabicmmlu_univ_economics"
"task_alias": "Univ Economics"
"dataset_name": "Univ Political Science"
"tag": "arabicmmlu_social_science_tasks"
"include": "_default_arabicmmlu_template_yaml"
"task": "arabicmmlu_univ_political_science"
"task_alias": "Univ Political Science"
......@@ -23,7 +23,7 @@ def doc_to_text(doc):
question = (
doc["Question"]
if doc["Context"] == ""
if not doc["Context"]
else f"{doc['Context']}\n\n{doc['Question']}"
)
......@@ -41,4 +41,4 @@ def doc_to_text(doc):
def doc_to_choice(doc):
return [alpa[i][0] for i in range(5) if doc[f"Option {i+1}"]]
return [alpa[i][0] for i in range(5) if doc[f"Option {i + 1}"]]
......@@ -9,7 +9,7 @@ validation_split: validation
test_split: test
fewshot_split: train
doc_to_text: 'Given the following question and four candidate answers (A, B, C and D), choose the best answer.\nQuestion: {{question.strip()}}\nA. {{choices.text[0]}}\nB. {{choices.text[1]}}\nC. {{choices.text[2]}}{% if choices.text|length > 3 %}\nD. {{choices.text[3]}}{% endif %}\nYour response should end with "The best answer is [the_answer_letter]" where the [the_answer_letter] is one of A, B, C or D.'
assistant_prefill: 'The best answer is'
gen_prefix: 'The best answer is'
fewshot_delimiter: "\n\n"
doc_to_target: "{{ 'ABCD'[answerKey|int - 1] if answerKey|string in '1234' else answerKey }}"
num_fewshot: 0
......
......@@ -258,7 +258,7 @@ def doc_to_text(src: str, tgt: str) -> str:
src_name, tgt_name = map(code_to_language_name, [src, tgt])
return f"""\
{src_name} sentence: {jinja_var('sentence_' + src)}
{src_name} sentence: {jinja_var("sentence_" + src)}
{tgt_name} sentence:"""
......
......@@ -30,6 +30,12 @@ Homepage: https://github.com/google/BIG-bench
* `group_name`: `Short description`
#### Tags
* `bigbench_generate_until`
* `bigbench_multiple_choice_a`
* `bigbench_multiple_choice_b`
#### Tasks
* `task_name`: `1-sentence description of what this particular task does`
......
group: bigbench_generate_until
tag: bigbench_generate_until
dataset_path: hails/bigbench
output_type: generate_until
dataset_kwargs:
......
group: bigbench_multiple_choice
tag: bigbench_multiple_choice_a
dataset_path: hails/bigbench
dataset_kwargs:
# num_shots: 0 # TODO: num of shots for `bigbench` HF dataset should be controlled through this, not through the typical methods
......
group: bigbench_multiple_choice
tag: bigbench_multiple_choice_b
dataset_path: hails/bigbench
dataset_kwargs:
# num_shots: 0 # TODO: num of shots for `bigbench` HF dataset should be controlled through this, not through the typical methods
......
......@@ -259,7 +259,7 @@ def doc_to_text(src: str, tgt: str) -> str:
src_name, tgt_name = map(code_to_language_name, [src, tgt])
return f"""\
{src_name} sentence: {jinja_var('sentence_' + src)}
{src_name} sentence: {jinja_var("sentence_" + src)}
{tgt_name} sentence:"""
......
......@@ -7,7 +7,7 @@ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
### Context: {doc["context"]}
### Question: {doc["question"]}
### Options:
(1) {doc['option#1']}\n(2) {doc["option#2"]}\n(3) {doc["option#3"]}\n(4) {doc['option#4']}\n(5) {doc['option#5']}
(1) {doc["option#1"]}\n(2) {doc["option#2"]}\n(3) {doc["option#3"]}\n(4) {doc["option#4"]}\n(5) {doc["option#5"]}
### Answer: 주어진 문제의 정답은"""
out_doc = {
......
......@@ -258,7 +258,7 @@ def doc_to_text(src: str, tgt: str) -> str:
src_name, tgt_name = map(code_to_language_name, [src, tgt])
return f"""\
{src_name} sentence: {jinja_var('sentence_' + src)}
{src_name} sentence: {jinja_var("sentence_" + src)}
{tgt_name} sentence:"""
......
......@@ -6,9 +6,26 @@ Title: `Global MMLU: Understanding and Addressing Cultural and Linguistic Biases
Abstract: [https://arxiv.org/abs/2412.03304](https://arxiv.org/abs/2412.03304)
Global-MMLU 🌍 is a multilingual evaluation set spanning 42 languages, including English. This dataset combines machine translations for MMLU questions along with professional translations and crowd-sourced post-edits. It also includes cultural sensitivity annotations for a subset of the questions (2850 questions per language) and classifies them as Culturally Sensitive (CS) 🗽 or Culturally Agnostic (CA) ⚖️. These annotations were collected as part of an open science initiative led by Cohere For AI in collaboration with many external collaborators from both industry and academia.
Global-MMLU-Lite is a balanced collection of culturally sensitive and culturally agnostic MMLU tasks. It is designed for efficient evaluation of multilingual models in 15 languages (including English). Only languages with human translations and post-edits in the original [Global-MMLU](https://huggingface.co/datasets/CohereForAI/Global-MMLU) 🌍 dataset have been included in the lite version.
Homepage: [https://huggingface.co/datasets/CohereForAI/Global-MMLU-Lite](https://huggingface.co/datasets/CohereForAI/Global-MMLU-Lite)
Homepage: \
[https://huggingface.co/datasets/CohereForAI/Global-MMLU](https://huggingface.co/datasets/CohereForAI/Global-MMLU) \
[https://huggingface.co/datasets/CohereForAI/Global-MMLU-Lite](https://huggingface.co/datasets/CohereForAI/Global-MMLU-Lite)
#### Groups
* `global_mmlu_{lang}`: This group uses `Global-MMLU-Lite` benchmark which supports 14 languages.
* `global_mmlu_full_{lang}`: This group uses `Global-MMLU` benchmark which supports 42 languages.
#### Subgroups (support only for `full` version)
* `global_mmlu_full_stem`
* `global_mmlu_full_humanities`
* `global_mmlu_full_social_sciences`
* `global_mmlu_full_other`
### Citation
......
import yaml
languages = [
"en",
"ar",
"fr",
"es",
"hi",
"de",
"id",
"it",
"ja",
"ko",
"pt",
"zh",
"yo",
"bn",
"sw",
]
def main() -> None:
for language in languages:
file_name = f"global_mmlu_{language}.yaml"
try:
with open(f"{file_name}", "w") as f:
f.write("# Generated by _generate_configs.py\n")
yaml.dump(
{
"include": "_default_yaml",
"task": f"global_mmlu_{language}",
"dataset_name": language,
},
f,
)
except FileExistsError:
pass
if __name__ == "__main__":
main()
tag:
- global_mmlu
dataset_path: CohereForAI/Global-MMLU-Lite
dataset_name: ar
test_split: test
fewshot_split: dev
fewshot_config:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment