Unverified Commit bb5b46d7 authored by Yu Shi Jie's avatar Yu Shi Jie Committed by GitHub
Browse files

Merge branch 'EleutherAI:main' into mmlu-pro

parents 1e4e058c 6f7b4a05
...@@ -354,11 +354,17 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None: ...@@ -354,11 +354,17 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
# Respect user's value passed in via CLI, otherwise default to True and add to comma-separated model args # Respect user's value passed in via CLI, otherwise default to True and add to comma-separated model args
if args.trust_remote_code: if args.trust_remote_code:
os.environ["HF_DATASETS_TRUST_REMOTE_CODE"] = str(args.trust_remote_code) eval_logger.info(
args.model_args = ( "Passed `--trust_remote_code`, setting environment variable `HF_DATASETS_TRUST_REMOTE_CODE=true`"
args.model_args
+ f",trust_remote_code={os.environ['HF_DATASETS_TRUST_REMOTE_CODE']}"
) )
# HACK: import datasets and override its HF_DATASETS_TRUST_REMOTE_CODE value internally,
# because it's already been determined based on the prior env var before launching our
# script--`datasets` gets imported by lm_eval internally before these lines can update the env.
import datasets
datasets.config.HF_DATASETS_TRUST_REMOTE_CODE = True
args.model_args = args.model_args + ",trust_remote_code=True"
eval_logger.info(f"Selected Tasks: {task_names}") eval_logger.info(f"Selected Tasks: {task_names}")
......
...@@ -271,6 +271,7 @@ def simple_evaluate( ...@@ -271,6 +271,7 @@ def simple_evaluate(
model_args=model_args, model_args=model_args,
system_instruction=system_instruction, system_instruction=system_instruction,
chat_template=lm.chat_template if apply_chat_template else None, chat_template=lm.chat_template if apply_chat_template else None,
fewshot_as_multiturn=fewshot_as_multiturn,
) )
results = evaluate( results = evaluate(
......
...@@ -48,6 +48,7 @@ class GeneralConfigTracker: ...@@ -48,6 +48,7 @@ class GeneralConfigTracker:
model_name_sanitized: str = None model_name_sanitized: str = None
system_instruction: str = None system_instruction: str = None
system_instruction_sha: str = None system_instruction_sha: str = None
fewshot_as_multiturn: bool = None
chat_template: str = None chat_template: str = None
chat_template_sha: str = None chat_template_sha: str = None
start_time: float = None start_time: float = None
...@@ -80,6 +81,7 @@ class GeneralConfigTracker: ...@@ -80,6 +81,7 @@ class GeneralConfigTracker:
model_args: str, model_args: str,
system_instruction: str, system_instruction: str,
chat_template: str, chat_template: str,
fewshot_as_multiturn: bool,
) -> None: ) -> None:
"""Logs model parameters and job ID.""" """Logs model parameters and job ID."""
self.model_source = model_source self.model_source = model_source
...@@ -91,6 +93,7 @@ class GeneralConfigTracker: ...@@ -91,6 +93,7 @@ class GeneralConfigTracker:
) )
self.chat_template = chat_template self.chat_template = chat_template
self.chat_template_sha = hash_string(chat_template) if chat_template else None self.chat_template_sha = hash_string(chat_template) if chat_template else None
self.fewshot_as_multiturn = fewshot_as_multiturn
def log_end_time(self) -> None: def log_end_time(self) -> None:
"""Logs the end time of the evaluation and calculates the total evaluation time.""" """Logs the end time of the evaluation and calculates the total evaluation time."""
......
...@@ -288,7 +288,7 @@ class NEURON_HF(TemplateLM): ...@@ -288,7 +288,7 @@ class NEURON_HF(TemplateLM):
self.vocab_size = self.tokenizer.vocab_size self.vocab_size = self.tokenizer.vocab_size
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
self.add_bos_token = self.add_bos_token self.add_bos_token = add_bos_token
self._max_length = max_length self._max_length = max_length
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
| [aexams](aexams/README.md) | Tasks in Arabic related to various academic exams covering a range of subjects. | Arabic | | [aexams](aexams/README.md) | Tasks in Arabic related to various academic exams covering a range of subjects. | Arabic |
| [agieval](agieval/README.md) | Tasks involving historical data or questions related to history and historical texts. | English, Chinese | | [agieval](agieval/README.md) | Tasks involving historical data or questions related to history and historical texts. | English, Chinese |
| [anli](anli/README.md) | Adversarial natural language inference tasks designed to test model robustness. | English | | [anli](anli/README.md) | Adversarial natural language inference tasks designed to test model robustness. | English |
| [arabicmmlu](arabicmmlu/README.md) | Localized Arabic version of MMLU with multiple-choice questions from 40 subjects. | Arabic |
| [arc](arc/README.md) | Tasks involving complex reasoning over a diverse set of questions. | English | | [arc](arc/README.md) | Tasks involving complex reasoning over a diverse set of questions. | English |
| [arithmetic](arithmetic/README.md) | Tasks involving numerical computations and arithmetic reasoning. | English | | [arithmetic](arithmetic/README.md) | Tasks involving numerical computations and arithmetic reasoning. | English |
| [asdiv](asdiv/README.md) | Tasks involving arithmetic and mathematical reasoning challenges. | English | | [asdiv](asdiv/README.md) | Tasks involving arithmetic and mathematical reasoning challenges. | English |
...@@ -19,6 +20,7 @@ ...@@ -19,6 +20,7 @@
| [bbh](bbh/README.md) | Tasks focused on deep semantic understanding through hypothesization and reasoning. | English, German | | [bbh](bbh/README.md) | Tasks focused on deep semantic understanding through hypothesization and reasoning. | English, German |
| [belebele](belebele/README.md) | Language understanding tasks in a variety of languages and scripts. | Multiple (122 languages) | | [belebele](belebele/README.md) | Language understanding tasks in a variety of languages and scripts. | Multiple (122 languages) |
| benchmarks | General benchmarking tasks that test a wide range of language understanding capabilities. | | | benchmarks | General benchmarking tasks that test a wide range of language understanding capabilities. | |
| [bertaqa](bertaqa/README.md) | Local Basque cultural trivia QA tests in English and Basque languages. | English, Basque, Basque (MT) |
| [bigbench](bigbench/README.md) | Broad tasks from the BIG-bench benchmark designed to push the boundaries of large models. | Multiple | | [bigbench](bigbench/README.md) | Broad tasks from the BIG-bench benchmark designed to push the boundaries of large models. | Multiple |
| [blimp](blimp/README.md) | Tasks testing grammatical phenomena to evaluate language model's linguistic capabilities. | English | | [blimp](blimp/README.md) | Tasks testing grammatical phenomena to evaluate language model's linguistic capabilities. | English |
| [ceval](ceval/README.md) | Tasks that evaluate language understanding and reasoning in an educational context. | Chinese | | [ceval](ceval/README.md) | Tasks that evaluate language understanding and reasoning in an educational context. | Chinese |
...@@ -70,6 +72,7 @@ ...@@ -70,6 +72,7 @@
| okapi/mmlu_multilingual | Tasks that involve reading comprehension and information retrieval challenges. | Multiple (34 languages) | | okapi/mmlu_multilingual | Tasks that involve reading comprehension and information retrieval challenges. | Multiple (34 languages) |
| [okapi/truthfulqa_multilingual](okapi/truthfulqa_multilingual/README.md) | Tasks that involve reading comprehension and information retrieval challenges. | Multiple (31 languages) | | [okapi/truthfulqa_multilingual](okapi/truthfulqa_multilingual/README.md) | Tasks that involve reading comprehension and information retrieval challenges. | Multiple (31 languages) |
| [openbookqa](openbookqa/README.md) | Open-book question answering tasks that require external knowledge and reasoning. | English | | [openbookqa](openbookqa/README.md) | Open-book question answering tasks that require external knowledge and reasoning. | English |
| [paloma](paloma/README.md) | Paloma is a comprehensive benchmark designed to evaluate open language models across a wide range of domains, ranging from niche artist communities to mental health forums on Reddit. | English |
| [paws-x](paws-x/README.md) | Paraphrase Adversaries from Word Scrambling, focusing on cross-lingual capabilities. | English, French, Spanish, German, Chinese, Japanese, Korean | | [paws-x](paws-x/README.md) | Paraphrase Adversaries from Word Scrambling, focusing on cross-lingual capabilities. | English, French, Spanish, German, Chinese, Japanese, Korean |
| [pile](pile/README.md) | Open source language modelling data set that consists of 22 smaller, high-quality datasets. | English | | [pile](pile/README.md) | Open source language modelling data set that consists of 22 smaller, high-quality datasets. | English |
| [pile_10k](pile_10k/README.md) | The first 10K elements of The Pile, useful for debugging models trained on it. | English | | [pile_10k](pile_10k/README.md) | The first 10K elements of The Pile, useful for debugging models trained on it. | English |
......
...@@ -312,8 +312,13 @@ class TaskManager: ...@@ -312,8 +312,13 @@ class TaskManager:
:return :return
Dictionary of task names as key and task metadata Dictionary of task names as key and task metadata
""" """
ignore_dirs = [
"__pycache__",
".ipynb_checkpoints",
]
tasks_and_groups = collections.defaultdict() tasks_and_groups = collections.defaultdict()
for root, _, file_list in os.walk(task_dir): for root, dirs, file_list in os.walk(task_dir):
dirs[:] = [d for d in dirs if d not in ignore_dirs]
for f in file_list: for f in file_list:
if f.endswith(".yaml"): if f.endswith(".yaml"):
yaml_path = os.path.join(root, f) yaml_path = os.path.join(root, f)
......
# ArabicMMLU
### Paper
Title: ArabicMMLU: Assessing Massive Multitask Language Understanding in Arabic
Abstract: https://arxiv.org/abs/2402.12840
The focus of language model evaluation has
transitioned towards reasoning and knowledge intensive tasks, driven by advancements in pretraining large models. While state-of-the-art models are partially trained on large Arabic texts, evaluating their performance in Arabic remains challenging due to the limited availability of relevant datasets. To bridge this gap, we present ArabicMMLU, the first multi-task language understanding benchmark for Arabic language, sourced from school exams across diverse educational levels in different countries spanning North Africa, the Levant, and the Gulf regions. Our data comprises 40 tasks and 14,575 multiple-choice questions in Modern Standard Arabic (MSA), and is carefully constructed by collaborating with native speakers in the region. Our comprehensive evaluations of 35 models reveal substantial room for improvement, particularly among the best open-source models. Notably, BLOOMZ, mT0, LLama2, and Falcon struggle to achieve a score of 50%, while even the top-performing Arabic centric model only achieves a score of 62.3%.
The authors of the paper conducted studies by varying the language of the initial prompt and answer keys between English and Arabic. However, they set English initial prompts and answer keys as the standard, which is the version implemented in this task.
Homepage: https://github.com/mbzuai-nlp/ArabicMMLU
### Citation
```
@misc{koto2024arabicmmlu,
title={ArabicMMLU: Assessing Massive Multitask Language Understanding in Arabic},
author={Fajri Koto and Haonan Li and Sara Shatnawi and Jad Doughman and Abdelrahman Boda Sadallah and Aisha Alraeesi and Khalid Almubarak and Zaid Alyafeai and Neha Sengupta and Shady Shehata and Nizar Habash and Preslav Nakov and Timothy Baldwin},
year={2024},
eprint={2402.12840},
archivePrefix={arXiv},
primaryClass={id='cs.CL' full_name='Computation and Language' is_active=True alt_name='cmp-lg' in_archive='cs' is_general=False description='Covers natural language processing. Roughly includes material in ACM Subject Class I.2.7. Note that work on artificial languages (programming languages, logics, formal systems) that does not explicitly address natural-language issues broadly construed (natural-language processing, computational linguistics, speech, text retrieval, etc.) is not appropriate for this area.'}
}
```
### Groups and Tasks
#### Groups
* `arabicmmlu`: evaluates all ArabicMMLU tasks.
* `arabicmmlu_stem`: evaluates STEM ArabicMMLU tasks.
* `arabicmmlu_stem_social_science`: evaluates social science ArabicMMLU tasks.
* `arabicmmlu_stem_humanities`: evaluates humanities ArabicMMLU tasks.
* `arabicmmlu_stem_language`: evaluates Arabic language ArabicMMLU tasks.
* `arabicmmlu_stem_other`: evaluates other ArabicMMLU tasks.
\ No newline at end of file
dataset_path: yazeed7/ArabicMMLU
test_split: test
fewshot_split: dev
fewshot_config:
sampler: first_n
output_type: multiple_choice
doc_to_text: !function utils.doc_to_text
doc_to_choice: !function utils.doc_to_choice
doc_to_target: "Answer Key"
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
"""
Take in a YAML, and output all "other" splits with this YAML
"""
import argparse
import logging
import os
import yaml
from tqdm import tqdm
eval_logger = logging.getLogger("lm-eval")
SUBJECTS = {
"Driving Test": "other",
"High Geography": "social_science",
"High History": "humanities",
"Islamic Studies": "humanities",
"Univ Accounting": "social_science",
"Primary General Knowledge": "other",
"Univ Political Science": "social_science",
"Primary Math": "stem",
"Middle General Knowledge": "other",
"High Biology": "stem",
"Primary Natural Science": "stem",
"High Economics": "social_science",
"Middle Natural Science": "stem",
"Middle Geography": "social_science",
"Primary Social Science": "social_science",
"Middle Computer Science": "stem",
"Middle Islamic Studies": "humanities",
"Primary Computer Science": "stem",
"High Physics": "stem",
"Middle Social Science": "social_science",
"Middle Civics": "social_science",
"High Computer Science": "stem",
"General Knowledge": "other",
"High Civics": "social_science",
"Prof Law": "humanities",
"High Islamic Studies": "humanities",
"Primary Arabic Language": "language",
"High Arabic Language": "language",
"Arabic Language (Grammar)": "language",
"Primary History": "humanities",
"Middle History": "humanities",
"Univ Economics": "social_science",
"Arabic Language (General)": "language",
"Univ Computer Science": "stem",
"Primary Islamic Studies": "humanities",
"Primary Geography": "social_science",
"High Philosophy": "humanities",
"Middle Arabic Language": "language",
"Middle Economics": "social_science",
"Univ Management": "other",
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--base_yaml_path", default="_default_template_yaml")
parser.add_argument("--save_prefix_path", default="arabicmmlu")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
# get filename of base_yaml so we can `"include": ` it in our "other" YAMLs.
base_yaml_name = os.path.split(args.base_yaml_path)[-1]
with open(args.base_yaml_path, encoding="utf-8") as f:
base_yaml = yaml.full_load(f)
ALL_CATEGORIES = []
for subject, category in tqdm(SUBJECTS.items()):
if category not in ALL_CATEGORIES:
ALL_CATEGORIES.append(category)
# description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n"
yaml_dict = {
"include": base_yaml_name,
"group": f"arabicmmlu_{category}",
"group_alias": category.replace("_", " "),
"task": f"arabicmmlu_{subject.lower().replace(' ', '_')}",
"task_alias": subject,
"dataset_name": subject,
# "description": description,
}
file_save_path = args.save_prefix_path + f"_{subject.lower().replace(' ', '_').replace('(', '').replace(')', '')}.yaml"
eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}")
with open(file_save_path, "w", encoding="utf-8") as yaml_file:
yaml.dump(
yaml_dict,
yaml_file,
allow_unicode=True,
default_style='"',
)
arabicmmlu_subcategories = [f"arabicmmlu_{category}" for category in ALL_CATEGORIES]
file_save_path = args.save_prefix_path + ".yaml"
eval_logger.info(f"Saving benchmark config to {file_save_path}")
with open(file_save_path, "w", encoding="utf-8") as yaml_file:
yaml.dump(
{
"group": "arabicmmlu",
"task": arabicmmlu_subcategories,
},
yaml_file,
indent=4,
default_flow_style=False,
)
group: arabicmmlu
task:
- arabicmmlu_other
- arabicmmlu_social_science
- arabicmmlu_humanities
- arabicmmlu_stem
- arabicmmlu_language
"dataset_name": "Arabic Language (General)"
"group": "arabicmmlu_language"
"group_alias": "language"
"include": "_default_template_yaml"
"task": "arabicmmlu_arabic_language_(general)"
"task_alias": "Arabic Language (General)"
"dataset_name": "Arabic Language (Grammar)"
"group": "arabicmmlu_language"
"group_alias": "language"
"include": "_default_template_yaml"
"task": "arabicmmlu_arabic_language_(grammar)"
"task_alias": "Arabic Language (Grammar)"
"dataset_name": "Driving Test"
"group": "arabicmmlu_other"
"group_alias": "other"
"include": "_default_template_yaml"
"task": "arabicmmlu_driving_test"
"task_alias": "Driving Test"
"dataset_name": "General Knowledge"
"group": "arabicmmlu_other"
"group_alias": "other"
"include": "_default_template_yaml"
"task": "arabicmmlu_general_knowledge"
"task_alias": "General Knowledge"
"dataset_name": "High Arabic Language"
"group": "arabicmmlu_language"
"group_alias": "language"
"include": "_default_template_yaml"
"task": "arabicmmlu_high_arabic_language"
"task_alias": "High Arabic Language"
"dataset_name": "High Biology"
"group": "arabicmmlu_stem"
"group_alias": "stem"
"include": "_default_template_yaml"
"task": "arabicmmlu_high_biology"
"task_alias": "High Biology"
"dataset_name": "High Civics"
"group": "arabicmmlu_social_science"
"group_alias": "social science"
"include": "_default_template_yaml"
"task": "arabicmmlu_high_civics"
"task_alias": "High Civics"
"dataset_name": "High Computer Science"
"group": "arabicmmlu_stem"
"group_alias": "stem"
"include": "_default_template_yaml"
"task": "arabicmmlu_high_computer_science"
"task_alias": "High Computer Science"
"dataset_name": "High Economics"
"group": "arabicmmlu_social_science"
"group_alias": "social science"
"include": "_default_template_yaml"
"task": "arabicmmlu_high_economics"
"task_alias": "High Economics"
"dataset_name": "High Geography"
"group": "arabicmmlu_social_science"
"group_alias": "social science"
"include": "_default_template_yaml"
"task": "arabicmmlu_high_geography"
"task_alias": "High Geography"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment