Commit 173b2bc3 authored by Baber's avatar Baber
Browse files

Merge branch 'main' into humaneval

# Conflicts:
#	lm_eval/api/task.py
parents 74344829 bb098f13
group: arabic_leaderboard_arabic_mt_mmlu
task:
- arabic_mt_mmlu
aggregate_metric_list:
- metric: acc
aggregation: mean
weight_by_size: true
- metric: acc_norm
aggregation: mean
weight_by_size: true
metadata:
version: 1.0
task: arabic_mt_mmlu
dataset_path: OALL/AlGhafa-Arabic-LLM-Benchmark-Translated
dataset_name: mmlu_okapi_ar
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
import datasets
import numpy as np
def process_docs(dataset: datasets.Dataset):
def _process_doc(doc):
question = doc["query"]
answer_index = int(doc["label"])
# Dynamically determining the choices by excluding '__few_shots', 'query' and 'label'
choices_keys = [
key for key in doc.keys() if key not in ["query", "label", "__few_shots"]
]
choices = [doc[key] for key in choices_keys]
instruction = "الأسئلة التالية هي أسئلة متعددة الإختيارات مع الجواب الصحيح\n\n"
query = f"{instruction}السؤال: {question}\n"
for index, choice in enumerate(choices):
query += f"{index}) {choice}\n"
query += "الإجابة:"
return {"query": query, "choices": choices, "gold": answer_index}
return dataset.map(_process_doc)
group: arabic_leaderboard_arabic_mt_openbook_qa
task:
- arabic_mt_openbook_qa
aggregate_metric_list:
- metric: acc
aggregation: mean
weight_by_size: true
- metric: acc_norm
aggregation: mean
weight_by_size: true
metadata:
version: 1.0
task: arabic_mt_openbook_qa
dataset_path: OALL/AlGhafa-Arabic-LLM-Benchmark-Translated
dataset_name: openbook_qa_ext_ar
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
import datasets
import numpy as np
def process_docs(dataset: datasets.Dataset):
def _process_doc(doc):
question = doc["query"]
answer_index = int(doc["label"])
# Dynamically determining the choices by excluding '__few_shots', 'query' and 'label'
choices_keys = [
key for key in doc.keys() if key not in ["query", "label", "__few_shots"]
]
choices = [doc[key] for key in choices_keys]
instruction = "الأسئلة التالية هي أسئلة متعددة الإختيارات مع الجواب الصحيح\n\n"
query = f"{instruction}السؤال: {question}\n"
for index, choice in enumerate(choices):
query += f"{index}) {choice}\n"
query += "الإجابة:"
return {"query": query, "choices": choices, "gold": answer_index}
return dataset.map(_process_doc)
group: arabic_leaderboard_arabic_mt_piqa
task:
- arabic_mt_piqa
aggregate_metric_list:
- metric: acc
aggregation: mean
weight_by_size: true
- metric: acc_norm
aggregation: mean
weight_by_size: true
metadata:
version: 1.0
task: arabic_mt_piqa
dataset_path: OALL/AlGhafa-Arabic-LLM-Benchmark-Translated
dataset_name: piqa_ar
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
import datasets
import numpy as np
def process_docs(dataset: datasets.Dataset):
def _process_doc(doc):
question = doc["query"]
answer_index = int(doc["label"])
# Dynamically determining the choices by excluding '__few_shots', 'query' and 'label'
choices_keys = [
key for key in doc.keys() if key not in ["query", "label", "__few_shots"]
]
choices = [doc[key] for key in choices_keys]
instruction = "الأسئلة التالية هي أسئلة متعددة الإختيارات مع الجواب الصحيح\n\n"
query = f"{instruction}السؤال: {question}\n"
for index, choice in enumerate(choices):
query += f"{index}) {choice}\n"
query += "الإجابة:"
return {"query": query, "choices": choices, "gold": answer_index}
return dataset.map(_process_doc)
group: arabic_leaderboard_arabic_mt_race
task:
- arabic_mt_race
aggregate_metric_list:
- metric: acc
aggregation: mean
weight_by_size: true
- metric: acc_norm
aggregation: mean
weight_by_size: true
metadata:
version: 1.0
task: arabic_mt_race
dataset_path: OALL/AlGhafa-Arabic-LLM-Benchmark-Translated
dataset_name: race_ar
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
import datasets
import numpy as np
def process_docs(dataset: datasets.Dataset):
def _process_doc(doc):
question = doc["query"]
answer_index = int(doc["label"])
# Dynamically determining the choices by excluding '__few_shots', 'query' and 'label'
choices_keys = [
key for key in doc.keys() if key not in ["query", "label", "__few_shots"]
]
choices = [doc[key] for key in choices_keys]
instruction = "الأسئلة التالية هي أسئلة متعددة الإختيارات مع الجواب الصحيح\n\n"
query = f"{instruction}السؤال: {question}\n"
for index, choice in enumerate(choices):
query += f"{index}) {choice}\n"
query += "الإجابة:"
return {"query": query, "choices": choices, "gold": answer_index}
return dataset.map(_process_doc)
group: arabic_leaderboard_arabic_mt_sciq
task:
- arabic_mt_sciq
aggregate_metric_list:
- metric: acc
aggregation: mean
weight_by_size: true
- metric: acc_norm
aggregation: mean
weight_by_size: true
metadata:
version: 1.0
task: arabic_mt_sciq
dataset_path: OALL/AlGhafa-Arabic-LLM-Benchmark-Translated
dataset_name: sciq_ar
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
import random
import datasets
import numpy as np
def doc_to_text(doc):
instruction = (
"بناءً على السياق أدناه، اختر الإجابة الصحيحة للسؤال التالي من قائمة الاقتراحات"
)
support = doc["support"]
question = doc["question"]
query = f"""{instruction}
السياق:
{support}
السؤال:
{question}
الإجابات المحتملة:
"""
return query
def process_docs(dataset: datasets.Dataset):
def _process_doc(doc):
correct_answer = doc["correct_answer"]
choices = [
doc["distractor1"],
doc["distractor2"],
doc["distractor3"],
correct_answer,
]
# Shuffle the choices
random.shuffle(choices)
answer_index = choices.index(correct_answer)
return {"query": doc_to_text(doc), "choices": choices, "gold": answer_index}
return dataset.map(_process_doc)
group: arabic_leaderboard_arabic_mt_toxigen
task:
- arabic_mt_toxigen
aggregate_metric_list:
- metric: acc
aggregation: mean
weight_by_size: true
- metric: acc_norm
aggregation: mean
weight_by_size: true
metadata:
version: 1.0
task: arabic_mt_toxigen
dataset_path: OALL/AlGhafa-Arabic-LLM-Benchmark-Translated
dataset_name: toxigen_ar
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
import datasets
import numpy as np
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
def _process_doc(doc):
text = doc["text"]
label = 1 if ((doc["toxicity_ai"] + doc["toxicity_human"]) > 5.5) else 0
instruction = 'هل العبارة التالية تحض على الكراهية؟ قم بالرد بـ"نعم" أو "لا".'
query = f"""{instruction}
العبارة:
'{text}'
الإجابة:
"""
out_doc = {
"query": query,
"choices": ["لا", "نعم"],
"gold": label,
}
return out_doc
return dataset.map(_process_doc)
group: arabic_leaderboard_acva
task:
- arabic_leaderboard_acva_Algeria
- arabic_leaderboard_acva_Ancient_Egypt
- arabic_leaderboard_acva_Arab_Empire
- arabic_leaderboard_acva_Arabic_Architecture
- arabic_leaderboard_acva_Arabic_Art
- arabic_leaderboard_acva_Arabic_Astronomy
- arabic_leaderboard_acva_Arabic_Calligraphy
- arabic_leaderboard_acva_Arabic_Ceremony
- arabic_leaderboard_acva_Arabic_Clothing
- arabic_leaderboard_acva_Arabic_Culture
- arabic_leaderboard_acva_Arabic_Food
- arabic_leaderboard_acva_Arabic_Funeral
- arabic_leaderboard_acva_Arabic_Geography
- arabic_leaderboard_acva_Arabic_History
- arabic_leaderboard_acva_Arabic_Language_Origin
- arabic_leaderboard_acva_Arabic_Literature
- arabic_leaderboard_acva_Arabic_Math
- arabic_leaderboard_acva_Arabic_Medicine
- arabic_leaderboard_acva_Arabic_Music
- arabic_leaderboard_acva_Arabic_Ornament
- arabic_leaderboard_acva_Arabic_Philosophy
- arabic_leaderboard_acva_Arabic_Physics_and_Chemistry
- arabic_leaderboard_acva_Arabic_Wedding
- arabic_leaderboard_acva_Bahrain
- arabic_leaderboard_acva_Comoros
- arabic_leaderboard_acva_Egypt_modern
- arabic_leaderboard_acva_InfluenceFromAncientEgypt
- arabic_leaderboard_acva_InfluenceFromByzantium
- arabic_leaderboard_acva_InfluenceFromChina
- arabic_leaderboard_acva_InfluenceFromGreece
- arabic_leaderboard_acva_InfluenceFromIslam
- arabic_leaderboard_acva_InfluenceFromPersia
- arabic_leaderboard_acva_InfluenceFromRome
- arabic_leaderboard_acva_Iraq
- arabic_leaderboard_acva_Islam_Education
- arabic_leaderboard_acva_Islam_branches_and_schools
- arabic_leaderboard_acva_Islamic_law_system
- arabic_leaderboard_acva_Jordan
- arabic_leaderboard_acva_Kuwait
- arabic_leaderboard_acva_Lebanon
- arabic_leaderboard_acva_Libya
- arabic_leaderboard_acva_Mauritania
- arabic_leaderboard_acva_Mesopotamia_civilization
- arabic_leaderboard_acva_Morocco
- arabic_leaderboard_acva_Oman
- arabic_leaderboard_acva_Palestine
- arabic_leaderboard_acva_Qatar
- arabic_leaderboard_acva_Saudi_Arabia
- arabic_leaderboard_acva_Somalia
- arabic_leaderboard_acva_Sudan
- arabic_leaderboard_acva_Syria
- arabic_leaderboard_acva_Tunisia
- arabic_leaderboard_acva_United_Arab_Emirates
- arabic_leaderboard_acva_Yemen
- arabic_leaderboard_acva_communication
- arabic_leaderboard_acva_computer_and_phone
- arabic_leaderboard_acva_daily_life
- arabic_leaderboard_acva_entertainment
aggregate_metric_list:
- metric: acc
aggregation: mean
weight_by_size: true
- metric: acc_norm
aggregation: mean
weight_by_size: true
metadata:
version: 1.0
task: arabic_leaderboard_acva_Algeria
dataset_path: OALL/ACVA
dataset_name: Algeria
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: test
process_docs: !function utils.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{gold}}"
doc_to_choice: "choices"
fewshot_split: validation
fewshot_config:
sampler: first_n
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 1.0
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment