Unverified Commit 8bc4afff authored by Boda Sadallah's avatar Boda Sadallah Committed by GitHub
Browse files

add arab_culture task (#3006)

* add arab_culture tasks

* add target_delimeter and remove debugging code
parent 5a481f43
"dataset_name": "UAE"
"include": "_default_arab_culture_mcq_template_yaml"
"tag": "arab_culture_gulf_tasks"
"task": "arab_culture_uae"
"task_alias": "UAE"
"dataset_name": "Yemen"
"include": "_default_arab_culture_mcq_template_yaml"
"tag": "arab_culture_gulf_tasks"
"task": "arab_culture_yemen"
"task_alias": "Yemen"
REGION_COUNTRY_PROMPT_AR = """
مهمتك هي اختيار الخيار الأنسب ثقافياً بناءً على السياق المقدم أدناه.
الموقع: {country}, {region}
الجملة: {first_statement}
يرجى مراعاة الفروق الثقافية للموقع المحدد واختيار الإجابة الأكثر ملاءمة من الخيارات المتاحة.
الخيارات:
{choices}
"""
REGION_PROMPT_AR = """
مهمتك هي اختيار الخيار الأنسب ثقافياً بناءً على السياق المقدم أدناه.
الموقع: {region}
الجملة: {first_statement}
يرجى مراعاة الفروق الثقافية للموقع المحدد واختيار الإجابة الأكثر ملاءمة من الخيارات المتاحة.
الخيارات:
{choices}
"""
BASE_PROMPT_AR = """
مهمتك هي اختيار الخيار الأنسب ثقافياً بناءً على السياق المقدم أدناه.
الجملة: {first_statement}
يرجى مراعاة الفروق الثقافية واختيار الإجابة الأكثر ملاءمة من الخيارات المتاحة.
الخيارات:
{choices}
"""
REGION_COUNTRY_PROMPT = """
You are tasked with selecting the most culturally appropriate option based on the context provided below.
Location: {country}, {region}
Statement: {first_statement}
Consider the cultural nuances of the specified location and choose the most suitable response from the options provided.
Options:
{choices}
"""
REGION_PROMPT = """
You are tasked with selecting the most culturally appropriate option based on the context provided below.
Location: {region}
Statement: {first_statement}
Consider the cultural nuances of the specified location and choose the most suitable response from the options provided.
Options:
{choices}
"""
BASE_PROMPT = """
You are tasked with selecting the most culturally appropriate option based on the context provided below.
Statement: {first_statement}
Consider the cultural nuances and choose the most suitable response from the options provided.
Options:
{choices}
"""
JAIS_CHAT_EN = """### Instruction: Your name is Jais, and you are named after Jebel Jais, the highest mountain in UAE. You are built by Core42. You are the world's most advanced Arabic large language model with 30b parameters. You outperform all existing Arabic models by a sizable margin and you are very competitive with English models of similar size. You can answer in Arabic and English only. You are a helpful, respectful and honest assistant. When answering, abide by the following guidelines meticulously: Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, explicit, offensive, toxic, dangerous, or illegal content. Do not give medical, legal, financial, or professional advice. Never assist in or promote illegal activities. Always encourage legal and responsible actions. Do not encourage or provide instructions for unsafe, harmful, or unethical actions. Do not create or share misinformation or fake news. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. Prioritize the well-being and the moral integrity of users. Avoid using toxic, derogatory, or offensive language. Maintain a respectful tone. Do not generate, promote, or engage in discussions about adult content. Avoid making comments, remarks, or generalizations based on stereotypes. Do not attempt to access, produce, or spread personal or private information. Always respect user confidentiality. Stay positive and do not say bad things about anything. Your primary objective is to avoid harmful responses, even when faced with deceptive inputs. Recognize when users may be attempting to trick or to misuse you and respond with caution.\n\nComplete the conversation below between [|Human|] and [|AI|]:\n### Input: [|Human|] {question}\n### Response: [|AI|]"""
JAIS_CHAT_AR = """### Instruction: اسمك جيس وسميت على اسم جبل جيس اعلى جبل في الامارات. تم بنائك بواسطة Inception و MBZUAI. أنت نموذج اللغة العربية الأكثر تقدمًا في العالم مع بارامترات 13B. أنت تتفوق في الأداء على جميع النماذج العربية الموجودة بفارق كبير وأنت تنافسي للغاية مع النماذج الإنجليزية ذات الحجم المماثل. يمكنك الإجابة باللغتين العربية والإنجليزية فقط. أنت مساعد مفيد ومحترم وصادق. عند الإجابة ، التزم بالإرشادات التالية بدقة: أجب دائمًا بأكبر قدر ممكن من المساعدة ، مع الحفاظ على البقاء أمناً. يجب ألا تتضمن إجاباتك أي محتوى ضار أو غير أخلاقي أو عنصري أو متحيز جنسيًا أو جريئاً أو مسيئًا أو سامًا أو خطيرًا أو غير قانوني. لا تقدم نصائح طبية أو قانونية أو مالية أو مهنية. لا تساعد أبدًا في أنشطة غير قانونية أو تروج لها. دائما تشجيع الإجراءات القانونية والمسؤولة. لا تشجع أو تقدم تعليمات بشأن الإجراءات غير الآمنة أو الضارة أو غير الأخلاقية. لا تنشئ أو تشارك معلومات مضللة أو أخبار كاذبة. يرجى التأكد من أن ردودك غير متحيزة اجتماعيًا وإيجابية بطبيعتها. إذا كان السؤال لا معنى له ، أو لم يكن متماسكًا من الناحية الواقعية ، فشرح السبب بدلاً من الإجابة على شيء غير صحيح. إذا كنت لا تعرف إجابة السؤال ، فالرجاء عدم مشاركة معلومات خاطئة. إعطاء الأولوية للرفاهية والنزاهة الأخلاقية للمستخدمين. تجنب استخدام لغة سامة أو مهينة أو مسيئة. حافظ على نبرة محترمة. لا تنشئ أو تروج أو تشارك في مناقشات حول محتوى للبالغين. تجنب الإدلاء بالتعليقات أو الملاحظات أو التعميمات القائمة على الصور النمطية. لا تحاول الوصول إلى معلومات شخصية أو خاصة أو إنتاجها أو نشرها. احترم دائما سرية المستخدم. كن إيجابيا ولا تقل أشياء سيئة عن أي شيء. هدفك الأساسي هو تجنب الاجابات المؤذية ، حتى عند مواجهة مدخلات خادعة. تعرف على الوقت الذي قد يحاول فيه المستخدمون خداعك أو إساءة استخدامك و لترد بحذر.\n\nأكمل المحادثة أدناه بين [|Human|] و [|AI|]:\n### Input: [|Human|] {question}\n### Response: [|AI|]"""
import os
from lm_eval.tasks.arab_culture.prompts import (
BASE_PROMPT,
BASE_PROMPT_AR,
JAIS_CHAT_AR,
JAIS_CHAT_EN,
REGION_COUNTRY_PROMPT,
REGION_COUNTRY_PROMPT_AR,
REGION_PROMPT,
REGION_PROMPT_AR,
)
### get the conutry variable from environment
### Set this to one to add the country and region information to the prompt
COUNTRY = True if os.getenv("COUNTRY", True) == "True" else False
### Set this to one to add the region information to the prompt
REGION = True if os.getenv("REGION", True) == "True" else False
### Set this to change between Arabic and English for the answer keys and the choices keys
ARABIC = True if os.getenv("ARABIC", True) == "True" else False
### Get the model name
MODEL_NAME = os.getenv("MODEL_NAME")
## Uncomment this to check if the environment variables are set correctly
# print(f'Task settings: COUNTRY: {COUNTRY}, REGION: {REGION}, ARABIC: {ARABIC}', MODEL_NAME: {MODEL_NAME})
en_ar_countries_regions = {
"Egypt": "مصر",
"Morocco": "المغرب",
"Algeria": "الجزائر",
"Libya": "ليبيا",
"Sudan": "السودان",
"Tunisia": "تونس",
"Jordan": "الأردن",
"Lebanon": "لبنان",
"Syria": "سوريا",
"Palestine": "فلسطين",
"Yemen": "اليمن",
"UAE": "الإمارات",
"KSA": "السعودية",
"Gulf": "الخليج",
"Levant": "الشام",
"North Africa": "شمال أفريقيا",
"Nile Valley": "وادي النيل",
}
def doc_to_text(doc):
country = "" if not doc["country"] else doc["country"]
region = "" if not doc["region"] else doc["region"]
first_statement = doc["first_statement"].strip()
## We don't have a setting for only information about the country without the region
if COUNTRY:
assert REGION, (
"If you want to add the country information, you must also add the region information"
)
## convert contry and region name to arabic if the language is arabic
if ARABIC:
country = en_ar_countries_regions[country]
region = en_ar_countries_regions[region]
choices = doc["options"]
choices_str = ""
for i in range(3):
key = choices["arabic_keys"][i] if ARABIC else choices["english_keys"][i]
choice_str = key + ". " + choices["text"][i].strip() + "\n"
choices_str += choice_str
if COUNTRY and REGION:
cur_prompt = REGION_COUNTRY_PROMPT_AR if ARABIC else REGION_COUNTRY_PROMPT
doc_text = cur_prompt.format(
country=country,
region=region,
first_statement=first_statement,
choices=choices_str,
)
elif REGION:
cur_prompt = REGION_PROMPT_AR if ARABIC else REGION_PROMPT
doc_text = cur_prompt.format(
region=region, first_statement=first_statement, choices=choices_str
)
else:
cur_prompt = BASE_PROMPT_AR if ARABIC else BASE_PROMPT
doc_text = cur_prompt.format(
first_statement=first_statement, choices=choices_str
)
### apply jais chat tempelate
if MODEL_NAME and "jais" in MODEL_NAME and "chat" in MODEL_NAME:
if ARABIC:
doc_text = JAIS_CHAT_AR.format(question=doc_text)
else:
doc_text = JAIS_CHAT_EN.format(question=doc_text)
return doc_text
def doc_to_choice(doc):
return doc["options"]["arabic_keys"] if ARABIC else doc["options"]["english_keys"]
def doc_to_target(doc):
ans = (
doc["answer_key"]["arabic_answer_key"]
if ARABIC
else doc["answer_key"]["english_answer_key"]
)
ans = ans.strip()
return ans
# Arab Culture
### Paper
Title: Commonsense Reasoning in Arab Culture
Abstract: https://arxiv.org/abs/2502.12788
Despite progress in Arabic large language models, such as Jais and AceGPT, their evaluation on commonsense reasoning has largely relied on machine-translated datasets, which lack cultural depth and may introduce Anglocentric biases. Commonsense reasoning is shaped by geographical and cultural contexts, and existing English datasets fail to capture the diversity of the Arab world. To address this, we introduce \datasetname, a commonsense reasoning dataset in Modern Standard Arabic (MSA), covering cultures of 13 countries across the Gulf, Levant, North Africa, and the Nile Valley. The dataset was built from scratch by engaging native speakers to write and validate culturally relevant questions for their respective countries. \datasetname spans 12 daily life domains with 54 fine-grained subtopics, reflecting various aspects of social norms, traditions, and everyday experiences. Zero-shot evaluations show that open-weight language models with up to 32B parameters struggle to comprehend diverse Arab cultures, with performance varying across regions. These findings highlight the need for more culturally aware models and datasets tailored to the Arabic-speaking world.
Homepage: https://github.com/fajri91/ArabicCulture
### Citation
```
@misc{sadallah2025commonsensereasoningarabculture,
title={Commonsense Reasoning in Arab Culture},
author={Abdelrahman Sadallah and Junior Cedric Tonga and Khalid Almubarak and Saeed Almheiri and Farah Atif and Chatrine Qwaider and Karima Kadaoui and Sara Shatnawi and Yaser Alesh and Fajri Koto},
year={2025},
eprint={2502.12788},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2502.12788},
}
```
### There are two variant of this task: `arab_culture`, and `arab_culture_completion`
- The `arab_culture` is the normal MCQ evaluation type, which appends the answers to the question, and then measure the likelihood of the different choices markers (A,B,C or "أ","ب","ج"). For more info, follow the MMLU style [tempelate](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/mmlu/default/_default_template_yaml#L7-L8)
- The `arab_culture_completion` do the evaluation in a sentence completion manner, by appending each asnwer to the question separetley and chooses the answer with the higher likelihood. See [this](https://github.com/EleutherAI/lm-evaluation-harness/blob/1f9bc88fe61f6bfa36f74e91ce3d59ab5685e4f1/lm_eval/tasks/arc/arc_easy.yaml#L10-L12) for more information
### Groups and Tasks
#### Groups
* `arabculture`: evaluates all ArabCulture tasks.
* `arab_culture_gulf`: evaluates Gulf countires ArabCulture tasks.
* `arab_culture_levant`: evaluates Levant countires ArabCulture tasks.
* `arab_culture_nile_valley`: evaluates Nile Valley countires ArabCulture tasks.
* `arab_culture_north_africa`: evaluates North Africa ArabCulture tasks.
### Evaluation modes
This bechmark allows for different evaluation settings by allowing to adding more extra context for the model:
We have three settings:
* without any information
```
COUNTRY=False
REGION=False
```
* with only region information
```
COUNTRY=False
REGION=True
```
* with region and country information
```
COUNTRY=True
REGION=True
```
**Please add these flags add environment variables.**
* We also allow for prompting in English, which we found to acheive higher results on most of the evaluated models (please refer to our paper).
* To change the language of the prompt, Define the `ARABIC` environment variable.
aggregate_metric_list:
metric: acc
weight_by_size: true
group: arab_culture_completion
metadata:
description: Arab Culture tasks
version: 0
task:
- arab_culture_completion_gulf
- arab_culture_completion_levant
- arab_culture_completion_north_africa
- arab_culture_completion_nile_valley
aggregate_metric_list:
metric: acc
weight_by_size: true
group: arab_culture_completion_gulf
group_alias: Gulf
metadata:
description: arab Culture tasks
version: 0
task:
- arab_culture_completion_gulf_tasks
aggregate_metric_list:
metric: acc
weight_by_size: true
group: arab_culture_completion_levant
group_alias: Levant
metadata:
description: arab Culture tasks
version: 0
task:
- arab_culture_completion_levant_tasks
aggregate_metric_list:
metric: acc
weight_by_size: true
group: arab_culture_completion_nile_valley
group_alias: Nile Valley
metadata:
description: arab Culture tasks
version: 0
task:
- arab_culture_completion_nile_valley_tasks
aggregate_metric_list:
metric: acc
weight_by_size: true
group: arab_culture_completion_north_africa
group_alias: North Africa
metadata:
description: arab Culture tasks
version: 0
task:
- arab_culture_completion_north_africa_tasks
dataset_path: boda/arabic_cluture
test_split: test
fewshot_split: test
fewshot_config:
sampler: first_n
output_type: multiple_choice
doc_to_text: !function utils_completion.doc_to_text
doc_to_choice: !function utils_completion.doc_to_choice
doc_to_target: !function utils_completion.doc_to_target
target_delimiter: ""
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 0.0
"""
Take in a YAML, and output all "other" splits with this YAML
"""
import argparse
import logging
import os
import yaml
from tqdm import tqdm
eval_logger = logging.getLogger("lm-eval")
countries = {
"KSA": "Gulf",
"UAE": "Gulf",
"Yemen": "Gulf",
"Lebanon": "Levant",
"Syria": "Levant",
"Palestine": "Levant",
"Jordan": "Levant",
"Tunisia": "North Africa",
"Algeria": "North Africa",
"Morocco": "North Africa",
"Libya": "North Africa",
"Egypt": "Nile Valley",
"Sudan": "Nile Valley",
}
VERSION = 0
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--base_yaml_path", default="_default_arab_culture_completion_template_yaml"
)
parser.add_argument("--save_prefix_path", default="arab_culture_completion")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
# get filename of base_yaml so we can `"include": ` it in our "other" YAMLs.
base_yaml_name = os.path.split(args.base_yaml_path)[-1]
# with open(args.base_yaml_path, encoding="utf-8") as f:
# base_yaml = yaml.full_load(f)
ALL_REGIONS = []
for country, region in tqdm(countries.items()):
if region not in ALL_REGIONS:
ALL_REGIONS.append(region)
# description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n"
yaml_dict = {
"include": base_yaml_name,
"tag": f"arab_culture_completion_{region.lower().replace(' ', '_')}_tasks",
"task": f"arab_culture_completion_{country.lower().replace(' ', '_')}",
"task_alias": country,
"dataset_name": country,
# "description": description,
}
file_save_path = (
args.save_prefix_path
+ f"_{country.lower().replace(' ', '_').replace('(', '').replace(')', '')}.yaml"
)
eval_logger.info(f"Saving yaml for subset {country} to {file_save_path}")
with open(file_save_path, "w", encoding="utf-8") as yaml_file:
yaml.dump(
yaml_dict,
yaml_file,
allow_unicode=True,
default_style='"',
)
arab_culture_completion_regions = [
f"arab_culture_completion_{region.lower().replace(' ', '_')}"
for region in ALL_REGIONS
]
file_save_path = args.save_prefix_path + ".yaml"
eval_logger.info(f"Saving benchmark config to {file_save_path}")
for region in ALL_REGIONS:
file_save_path = (
args.save_prefix_path + f"_{region.lower().replace(' ', '_')}.yaml"
)
eval_logger.info(f"Saving yaml for subset {region} to {file_save_path}")
with open("_" + file_save_path, "w", encoding="utf-8") as yaml_file:
yaml.dump(
{
"group": f"arab_culture_completion_{region.lower().replace(' ', '_')}",
"group_alias": region,
"task": [
f"arab_culture_completion_{region.lower().replace(' ', '_')}_tasks"
],
"aggregate_metric_list": {"metric": "acc", "weight_by_size": True},
"metadata": {
"description": "arab Culture tasks",
"version": VERSION,
},
},
yaml_file,
indent=4,
default_flow_style=False,
)
file_save_path = args.save_prefix_path + ".yaml"
with open("_" + file_save_path, "w", encoding="utf-8") as yaml_file:
yaml.dump(
{
"group": "arab_culture_completion",
"task": arab_culture_completion_regions,
"aggregate_metric_list": {"metric": "acc", "weight_by_size": True},
"metadata": {"description": "Arab Culture tasks", "version": VERSION},
},
yaml_file,
indent=4,
default_flow_style=False,
)
"dataset_name": "Algeria"
"include": "_default_arab_culture_completion_template_yaml"
"tag": "arab_culture_completion_north_africa_tasks"
"task": "arab_culture_completion_algeria"
"task_alias": "Algeria"
"dataset_name": "Egypt"
"include": "_default_arab_culture_completion_template_yaml"
"tag": "arab_culture_completion_nile_valley_tasks"
"task": "arab_culture_completion_egypt"
"task_alias": "Egypt"
"dataset_name": "Jordan"
"include": "_default_arab_culture_completion_template_yaml"
"tag": "arab_culture_completion_levant_tasks"
"task": "arab_culture_completion_jordan"
"task_alias": "Jordan"
"dataset_name": "KSA"
"include": "_default_arab_culture_completion_template_yaml"
"tag": "arab_culture_completion_gulf_tasks"
"task": "arab_culture_completion_ksa"
"task_alias": "KSA"
"dataset_name": "Lebanon"
"include": "_default_arab_culture_completion_template_yaml"
"tag": "arab_culture_completion_levant_tasks"
"task": "arab_culture_completion_lebanon"
"task_alias": "Lebanon"
"dataset_name": "Libya"
"include": "_default_arab_culture_completion_template_yaml"
"tag": "arab_culture_completion_north_africa_tasks"
"task": "arab_culture_completion_libya"
"task_alias": "Libya"
"dataset_name": "Morocco"
"include": "_default_arab_culture_completion_template_yaml"
"tag": "arab_culture_completion_north_africa_tasks"
"task": "arab_culture_completion_morocco"
"task_alias": "Morocco"
"dataset_name": "Palestine"
"include": "_default_arab_culture_completion_template_yaml"
"tag": "arab_culture_completion_levant_tasks"
"task": "arab_culture_completion_palestine"
"task_alias": "Palestine"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment