Commit e1ae8a2f authored by Herbie Bradley's avatar Herbie Bradley
Browse files

Merge remote-tracking branch 'origin/big-refactor' into calibration

parents 50e99bd7 30936bc7
# Generated by utils.py
dataset_name: unit_conversion_zero_shot
include: ../multiple_choice_template_yaml
task: bigbench_unit_conversion_multiple_choice
# Generated by utils.py
dataset_name: unit_interpretation_zero_shot
include: ../multiple_choice_template_yaml
task: bigbench_unit_interpretation_multiple_choice
# Generated by utils.py
dataset_name: unnatural_in_context_learning_zero_shot
include: ../multiple_choice_template_yaml
task: bigbench_unnatural_in_context_learning_multiple_choice
# Generated by utils.py
dataset_name: vitaminc_fact_verification_zero_shot
include: ../multiple_choice_template_yaml
task: bigbench_vitaminc_fact_verification_multiple_choice
# Generated by utils.py
dataset_name: what_is_the_tao_zero_shot
include: ../multiple_choice_template_yaml
task: bigbench_what_is_the_tao_multiple_choice
# Generated by utils.py
dataset_name: which_wiki_edit_zero_shot
include: ../multiple_choice_template_yaml
task: bigbench_which_wiki_edit_multiple_choice
# Generated by utils.py
dataset_name: winowhy_zero_shot
include: ../multiple_choice_template_yaml
task: bigbench_winowhy_multiple_choice
# Generated by utils.py
dataset_name: word_sorting_zero_shot
include: ../multiple_choice_template_yaml
task: bigbench_word_sorting_multiple_choice
# Generated by utils.py
dataset_name: word_unscrambling_zero_shot
include: ../multiple_choice_template_yaml
task: bigbench_word_unscrambling_multiple_choice
group: bigbench_multiple_choice
dataset_path: hails/bigbench
dataset_kwargs:
# num_shots: 0 # TODO: num of shots for `bigbench` HF dataset should be controlled through this, not through the typical methods
# subtask_name: null
output_type: multiple_choice
test_split: default
doc_to_text: inputs
doc_to_target: "{{multiple_choice_targets.index(targets[0])}}"
doc_to_choice: "{{multiple_choice_targets}}"
metric_list:
- metric: acc
# TODO: brier score and other metrics
"""
A utility script that pushes all Bigbench subtasks from their form in the `bigbench` HF dataset
into `{org name}/bigbench`.
Prior to running, log into HF Hub for the target HF hub org via `huggingface-cli login`.
Requires the installation of
`pip install "bigbench @ https://storage.googleapis.com/public_research_data/bigbench/bigbench-0.0.1.tar.gz"`
and is included so that the bigbench dependency can be avoided.
"""
from tqdm import tqdm
import datasets
import bigbench.api.util as bb_utils
all_task_names = bb_utils.get_all_json_task_names()
num_shots = [0]
for shots in num_shots:
for task_name in tqdm(all_task_names):
try:
print(f"Loading '{task_name}' with num_shots={shots}...")
task_ds = datasets.load_dataset("bigbench", name=task_name, num_shots=shots)
print(f"Pushing '{task_name}' with num_shots={shots}...")
task_ds.push_to_hub("hails/bigbench", task_name + "_zero_shot")
del task_ds
except Exception as e:
raise e
# C-Eval (Validation)
### Paper
C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models
https://arxiv.org/pdf/2305.08322.pdf
C-Eval is a comprehensive Chinese evaluation suite for foundation models.
It consists of 13948 multi-choice questions spanning 52 diverse disciplines
and four difficulty levels.
Homepage: https://cevalbenchmark.com/
### Citation
```bibtex
@article{huang2023ceval,
title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian},
journal={arXiv preprint arXiv:2305.08322},
year={2023}
}
```
SUBJECTS = {
"computer_network":"计算机网络",
"operating_system":"操作系统",
"computer_architecture":"计算机组成",
"college_programming":"大学编程",
"college_physics":"大学物理",
"college_chemistry":"大学化学",
"advanced_mathematics":"高等数学",
"probability_and_statistics":"概率统计",
"discrete_mathematics":"离散数学",
"electrical_engineer":"注册电气工程师",
"metrology_engineer":"注册计量师",
"high_school_mathematics":"高中数学",
"high_school_physics":"高中物理",
"high_school_chemistry":"高中化学",
"high_school_biology":"高中生物",
"middle_school_mathematics":"初中数学",
"middle_school_biology":"初中生物",
"middle_school_physics":"初中物理",
"middle_school_chemistry":"初中化学",
"veterinary_medicine":"兽医学",
"college_economics":"大学经济学",
"business_administration":"工商管理",
"marxism":"马克思主义基本原理",
"mao_zedong_thought":"毛泽东思想和中国特色社会主义理论体系概论",
"education_science":"教育学",
"teacher_qualification":"教师资格",
"high_school_politics":"高中政治",
"high_school_geography":"高中地理",
"middle_school_politics":"初中政治",
"middle_school_geography":"初中地理",
"modern_chinese_history":"近代史纲要",
"ideological_and_moral_cultivation":"思想道德修养与法律基础",
"logic":"逻辑学",
"law":"法学",
"chinese_language_and_literature":"中国语言文学",
"art_studies":"艺术学",
"professional_tour_guide":"导游资格",
"legal_professional":"法律职业资格",
"high_school_chinese":"高中语文",
"high_school_history":"高中历史",
"middle_school_history":"初中历史",
"civil_servant":"公务员",
"sports_science":"体育学",
"plant_protection":"植物保护",
"basic_medicine":"基础医学",
"clinical_medicine":"临床医学",
"urban_and_rural_planner":"注册城乡规划师",
"accountant":"注册会计师",
"fire_engineer":"注册消防工程师",
"environmental_impact_assessment_engineer":"环境影响评价工程师",
"tax_accountant":"税务师",
"physician":"医师资格"
}
# CMMLU
### Paper
CMMLU: Measuring massive multitask language understanding in Chinese
https://arxiv.org/abs/2306.09212
CMMLU is a comprehensive evaluation benchmark specifically designed to evaluate the knowledge and reasoning abilities of LLMs within the context of Chinese language and culture.
CMMLU covers a wide range of subjects, comprising 67 topics that span from elementary to advanced professional levels.
Homepage: https://github.com/haonan-li/CMMLU
### Citation
```bibtex
@misc{li2023cmmlu,
title={CMMLU: Measuring massive multitask language understanding in Chinese},
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
year={2023},
eprint={2306.09212},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
### Groups and Tasks
#### Groups
- `ceval-valid`: All 52 subjects of the C-Eval dataset, evaluated following the methodology in MMLU's original implementation. This implementation consists solely of the validation set of C-Eval, as the test set requires submission of model predictions to an external site.
#### Tasks
The following tasks evaluate subjects in the C-Eval dataset using loglikelihood-based multiple-choice scoring:
- `ceval-valid_{subject_english}`
### Checklist
* [x] Is the task an existing benchmark in the literature?
* [x] Have you referenced the original paper that introduced the task?
* [ ] If yes, does the original paper provide a reference implementation?
If other tasks on this dataset are already supported:
* [x] Is the "Main" variant of this task clearly denoted?
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
group: ceval-valid
dataset_path: ceval/ceval-exam
validation_split: val
fewshot_split: dev
fewshot_config:
sampler: first_n
output_type: multiple_choice
doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:"
doc_to_choice: ["A", "B", "C", "D"]
doc_to_target: "{{['A', 'B', 'C', 'D'].index(answer)}}"
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: "1.0"
"""
Take in a YAML, and output all other splits with this YAML
"""
import os
import yaml
import argparse
from tqdm import tqdm
from lm_eval.logger import eval_logger
SUBJECTS = {
"computer_network": "计算机网络",
"operating_system": "操作系统",
"computer_architecture": "计算机组成",
"college_programming": "大学编程",
"college_physics": "大学物理",
"college_chemistry": "大学化学",
"advanced_mathematics": "高等数学",
"probability_and_statistics": "概率统计",
"discrete_mathematics": "离散数学",
"electrical_engineer": "注册电气工程师",
"metrology_engineer": "注册计量师",
"high_school_mathematics": "高中数学",
"high_school_physics": "高中物理",
"high_school_chemistry": "高中化学",
"high_school_biology": "高中生物",
"middle_school_mathematics": "初中数学",
"middle_school_biology": "初中生物",
"middle_school_physics": "初中物理",
"middle_school_chemistry": "初中化学",
"veterinary_medicine": "兽医学",
"college_economics": "大学经济学",
"business_administration": "工商管理",
"marxism": "马克思主义基本原理",
"mao_zedong_thought": "毛泽东思想和中国特色社会主义理论体系概论",
"education_science": "教育学",
"teacher_qualification": "教师资格",
"high_school_politics": "高中政治",
"high_school_geography": "高中地理",
"middle_school_politics": "初中政治",
"middle_school_geography": "初中地理",
"modern_chinese_history": "近代史纲要",
"ideological_and_moral_cultivation": "思想道德修养与法律基础",
"logic": "逻辑学",
"law": "法学",
"chinese_language_and_literature": "中国语言文学",
"art_studies": "艺术学",
"professional_tour_guide": "导游资格",
"legal_professional": "法律职业资格",
"high_school_chinese": "高中语文",
"high_school_history": "高中历史",
"middle_school_history": "初中历史",
"civil_servant": "公务员",
"sports_science": "体育学",
"plant_protection": "植物保护",
"basic_medicine": "基础医学",
"clinical_medicine": "临床医学",
"urban_and_rural_planner": "注册城乡规划师",
"accountant": "注册会计师",
"fire_engineer": "注册消防工程师",
"environmental_impact_assessment_engineer": "环境影响评价工程师",
"tax_accountant": "税务师",
"physician": "医师资格",
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--base_yaml_path", required=True)
parser.add_argument("--save_prefix_path", default="ceval-valid")
parser.add_argument("--cot_prompt_path", default=None)
parser.add_argument("--task_prefix", default="")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
# get filename of base_yaml so we can `"include": ` it in our other YAMLs.
base_yaml_name = os.path.split(args.base_yaml_path)[-1]
with open(args.base_yaml_path) as f:
base_yaml = yaml.full_load(f)
if args.cot_prompt_path is not None:
import json
with open(args.cot_prompt_path) as f:
cot_file = json.load(f)
for subject_eng, subject_zh in tqdm(SUBJECTS.items()):
if args.cot_prompt_path is not None:
description = cot_file[subject_eng]
else:
description = f"以下是中国关于{subject_zh}的单项选择题,请选出其中的正确答案。\n\n"
yaml_dict = {
"include": base_yaml_name,
"task": f"ceval-valid_{args.task_prefix}_{subject_eng}"
if args.task_prefix != ""
else f"ceval-valid_{subject_eng}",
"dataset_name": subject_eng,
"description": description,
}
file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml"
eval_logger.info(f"Saving yaml for subset {subject_eng} to {file_save_path}")
with open(file_save_path, "w") as yaml_file:
yaml.dump(
yaml_dict,
yaml_file,
width=float("inf"),
allow_unicode=True,
default_style='"',
)
"dataset_name": "accountant"
"description": "以下是中国关于注册会计师的单项选择题,请选出其中的正确答案。\n\n"
"include": "_default_ceval_yaml"
"task": "ceval-valid_accountant"
"dataset_name": "advanced_mathematics"
"description": "以下是中国关于高等数学的单项选择题,请选出其中的正确答案。\n\n"
"include": "_default_ceval_yaml"
"task": "ceval-valid_advanced_mathematics"
"dataset_name": "art_studies"
"description": "以下是中国关于艺术学的单项选择题,请选出其中的正确答案。\n\n"
"include": "_default_ceval_yaml"
"task": "ceval-valid_art_studies"
"dataset_name": "basic_medicine"
"description": "以下是中国关于基础医学的单项选择题,请选出其中的正确答案。\n\n"
"include": "_default_ceval_yaml"
"task": "ceval-valid_basic_medicine"
"dataset_name": "business_administration"
"description": "以下是中国关于工商管理的单项选择题,请选出其中的正确答案。\n\n"
"include": "_default_ceval_yaml"
"task": "ceval-valid_business_administration"
"dataset_name": "chinese_language_and_literature"
"description": "以下是中国关于中国语言文学的单项选择题,请选出其中的正确答案。\n\n"
"include": "_default_ceval_yaml"
"task": "ceval-valid_chinese_language_and_literature"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment