Commit 4eecbabb authored by Baber's avatar Baber
Browse files

Merge branch 'main' into prefill

parents dac8b534 fb963f0f
task: arabic_leaderboard_acva_entertainment
dataset_path: OALL/ACVA
dataset_name: entertainment
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
import datasets
import numpy as np
def process_docs(dataset: datasets.Dataset):
def _process_doc(doc):
question = doc["question"]
answer = doc["answer"]
return {
"query": f"السؤال: {question}\nالإجابة:",
"choices": ["صح", "خطأ"],
"gold": ["صح", "خطأ"].index(answer),
}
return dataset.map(_process_doc)
group: arabic_leaderboard_complete
task:
- arabic_leaderboard_acva
- arabic_leaderboard_alghafa
- arabic_leaderboard_arabic_exams
- arabic_leaderboard_arabic_mt_arc_challenge
- arabic_leaderboard_arabic_mt_arc_easy
- arabic_leaderboard_arabic_mt_boolq
- arabic_leaderboard_arabic_mt_hellaswag
- arabic_leaderboard_arabic_mt_mmlu
- arabic_leaderboard_arabic_mt_copa
- arabic_leaderboard_arabic_mt_openbook_qa
- arabic_leaderboard_arabic_mt_piqa
- arabic_leaderboard_arabic_mt_race
- arabic_leaderboard_arabic_mt_sciq
- arabic_leaderboard_arabic_mt_toxigen
aggregate_metric_list:
- metric: acc
aggregation: mean
weight_by_size: true
- metric: acc_norm
aggregation: mean
weight_by_size: true
metadata:
version: 1.0
# Arabic Leaderboard Light
Title: Open Arabic LLM Leaderboard Light
This leaderboard follows all the details as in [`arabic_leaderboard_complete`](../arabic_leaderboard_complete), except that a light version - 10% random sample of the test set of each benchmark - is used to test the language models.
NOTE: In ACVA benchmark, there is Yemen subset, and it is a small dataset - it has only 10 samples in the test split. So, for this specific subset dataset, to have more reliable results, we consider the original dataset, instead of 10% of its test samples.
### Checklist
For adding novel benchmarks/datasets to the library:
* [ ] Is the task an existing benchmark in the literature?
* [ ] Have you referenced the original paper that introduced the task?
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted?
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
group: arabic_leaderboard_alghafa_light
task:
- arabic_leaderboard_alghafa_mcq_exams_test_ar_light
- arabic_leaderboard_alghafa_meta_ar_dialects_light
- arabic_leaderboard_alghafa_meta_ar_msa_light
- arabic_leaderboard_alghafa_multiple_choice_facts_truefalse_balanced_task_light
- arabic_leaderboard_alghafa_multiple_choice_grounded_statement_soqal_task_light
- arabic_leaderboard_alghafa_multiple_choice_grounded_statement_xglue_mlqa_task_light
- arabic_leaderboard_alghafa_multiple_choice_rating_sentiment_no_neutral_task_light
- arabic_leaderboard_alghafa_multiple_choice_rating_sentiment_task_light
- arabic_leaderboard_alghafa_multiple_choice_sentiment_task_light
aggregate_metric_list:
- metric: acc
aggregation: mean
weight_by_size: true
- metric: acc_norm
aggregation: mean
weight_by_size: true
metadata:
version: 1.0
task: arabic_leaderboard_alghafa_mcq_exams_test_ar_light
dataset_path: arcee-globe/AlGhafa-Arabic-LLM-Benchmark-Native-10percent
dataset_name: mcq_exams_test_ar
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
task: arabic_leaderboard_alghafa_meta_ar_dialects_light
dataset_path: arcee-globe/AlGhafa-Arabic-LLM-Benchmark-Native-10percent
dataset_name: meta_ar_dialects
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
task: arabic_leaderboard_alghafa_meta_ar_msa_light
dataset_path: arcee-globe/AlGhafa-Arabic-LLM-Benchmark-Native-10percent
dataset_name: meta_ar_msa
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
task: arabic_leaderboard_alghafa_multiple_choice_facts_truefalse_balanced_task_light
dataset_path: arcee-globe/AlGhafa-Arabic-LLM-Benchmark-Native-10percent
dataset_name: multiple_choice_facts_truefalse_balanced_task
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
task: arabic_leaderboard_alghafa_multiple_choice_grounded_statement_soqal_task_light
dataset_path: arcee-globe/AlGhafa-Arabic-LLM-Benchmark-Native-10percent
dataset_name: multiple_choice_grounded_statement_soqal_task
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
task: arabic_leaderboard_alghafa_multiple_choice_grounded_statement_xglue_mlqa_task_light
dataset_path: arcee-globe/AlGhafa-Arabic-LLM-Benchmark-Native-10percent
dataset_name: multiple_choice_grounded_statement_xglue_mlqa_task
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
task: arabic_leaderboard_alghafa_multiple_choice_rating_sentiment_no_neutral_task_light
dataset_path: arcee-globe/AlGhafa-Arabic-LLM-Benchmark-Native-10percent
dataset_name: multiple_choice_rating_sentiment_no_neutral_task
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
task: arabic_leaderboard_alghafa_multiple_choice_rating_sentiment_task_light
dataset_path: arcee-globe/AlGhafa-Arabic-LLM-Benchmark-Native-10percent
dataset_name: multiple_choice_rating_sentiment_task
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
task: arabic_leaderboard_alghafa_multiple_choice_sentiment_task_light
dataset_path: arcee-globe/AlGhafa-Arabic-LLM-Benchmark-Native-10percent
dataset_name: multiple_choice_sentiment_task
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
import datasets
import numpy as np
def process_docs(dataset: datasets.Dataset):
def _process_doc(doc):
question = doc["query"]
answer_index = int(doc["label"])
# Dynamically determining the choices by excluding '__few_shots', 'query' and 'label'
choices_keys = [
key for key in doc.keys() if key not in ["query", "label", "__few_shots"]
]
choices = [doc[key] for key in choices_keys]
instruction = "الأسئلة التالية هي أسئلة متعددة الإختيارات مع الجواب الصحيح\n\n"
query = f"{instruction}السؤال: {question}\n"
for index, choice in enumerate(choices):
query += f"{index}) {choice}\n"
query += "الإجابة:"
return {"query": query, "choices": choices, "gold": answer_index}
return dataset.map(_process_doc)
task: arabic_exams_light
dataset_path: arcee-globe/Arabic_EXAMS-10percent
dataset_name: default
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
group: arabic_leaderboard_arabic_exams_light
task:
- arabic_exams_light
aggregate_metric_list:
- metric: acc
aggregation: mean
weight_by_size: true
- metric: acc_norm
aggregation: mean
weight_by_size: true
metadata:
version: 1.0
import datasets
import numpy as np
# fmt: off
LETTER_INDICES_AR = ["أ", "ب", "ج", "د", "هـ", "و", "ز", "ح", "ط", "ي", "ك", "ل", "م", "ن", "س", "ع", "ف", "ص", "ق", "ر", "ش", "ت", "ث", "خ", "ذ", "ض", "ظ", "غ"]
# fmt: on
# fmt: off
LETTER_INDICES = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
# fmt: on
def process_docs(dataset: datasets.Dataset):
def _process_doc(doc):
topic = doc["subject"]
question = doc["question"]
choices = [doc["A"], doc["B"], doc["C"], doc["D"]]
choices_formatted = [
f" {LETTER_INDICES_AR[i]}) {choice}\n" for i, choice in enumerate(choices)
]
answer = doc["answer"]
answer_index = LETTER_INDICES.index(answer)
instruction = f"الأسئلة التالية هي أسئلة متعددة الإختيارات مع الجواب الصحيح حول {topic.replace('_', ' ')}. \n\n"
query = f"{instruction}السؤال: {question}\n"
query += "\n".join(choices_formatted)
query += "\nالإجابة:"
return {"query": query, "choices": LETTER_INDICES_AR[:4], "gold": answer_index}
return dataset.map(_process_doc)
task: arabic_leaderboard_arabic_mmlu_abstract_algebra_light
dataset_path: arcee-globe/Arabic_MMLU-10percent
dataset_name: abstract_algebra
output_type: multiple_choice
training_split: null
validation_split: dev
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: dev
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
task: arabic_leaderboard_arabic_mmlu_anatomy_light
dataset_path: arcee-globe/Arabic_MMLU-10percent
dataset_name: anatomy
output_type: multiple_choice
training_split: null
validation_split: dev
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: dev
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment