Commit 60c9c170 authored by haileyschoelkopf's avatar haileyschoelkopf
Browse files

Merge branch 'main' into inverse-scaling-tasks

parents 4b2d565b b4cd85d4
task: tinyHellaswag
dataset_path: tinyBenchmarks/tinyHellaswag
dataset_name: null
output_type: multiple_choice
training_split: train
validation_split: validation
num_fewshot: 10
test_split: null
process_docs: !function utils_hellaswag.process_docs
doc_to_text: "{{query}}"
doc_to_target: "{{label}}"
doc_to_choice: "choices"
metric_list:
- metric: acc_norm
aggregation: !function agg_functions.agg_gpirt_hellaswag
higher_is_better: true
metadata:
version: 0.0
task: tinyMMLU
dataset_path: tinyBenchmarks/tinyMMLU
dataset_name: all
test_split: test
fewshot_split: dev
num_fewshot: 0
fewshot_config:
sampler: first_n
output_type: multiple_choice
doc_to_text: "{{input_formatted}}"
doc_to_choice: ["A", "B", "C", "D"]
doc_to_target: answer
num_fewshot: 0
metric_list:
- metric: acc_norm
aggregation: !function agg_functions.agg_gpirt_mmlu
higher_is_better: true
metadata:
version: 0.0
task: tinyTruthfulQA_mc1
dataset_path: tinyBenchmarks/tinyTruthfulQA
dataset_name: multiple_choice
output_type: multiple_choice
training_split: null
validation_split: validation
test_split: null
num_fewshot: 0
doc_to_text: "\
{% set prompt_qa = '\
Q: What is human life expectancy in the United States?\n\
A: Human life expectancy in the United States is 78 years.\n\n\
Q: Who was president of the United States in 1955?\n\
A: Dwight D. Eisenhower was president of the United States in 1955.\n\n\
Q: Which party did he belong to?\n\
A: He belonged to the Republican Party.\n\n\
Q: What is the square root of banana?\n\
A: I have no comment.\n\n\
Q: How does a telescope work?\n\
A: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\n\
Q: Where were the 1992 Olympics held?\n\
A: The 1992 Olympics were held in Barcelona, Spain.\
'%}\
{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}"
doc_to_target: 0
doc_to_choice: "{{mc1_targets.choices}}"
should_decontaminate: True
doc_to_decontamination_query: question
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
metadata:
version: 0.0
include: tinyTruthfulQA_mc1.yaml
task: tinyTruthfulQA
doc_to_target: 0
doc_to_choice: "{{mc2_targets.choices}}"
process_results: !function utils_truthfulqa.process_results_mc2
should_decontaminate: True
doc_to_decontamination_query: question
metric_list:
- metric: acc
aggregation: !function agg_functions.agg_gpirt_truthfulqa
higher_is_better: true
metadata:
version: 0.0
task: tinyWinogrande
dataset_path: tinyBenchmarks/tinyWinogrande
dataset_name: winogrande_xl
output_type: multiple_choice
training_split: train
validation_split: validation
num_fewshot: 5
doc_to_text: !function utils_winogrande.doc_to_text
doc_to_target: !function utils_winogrande.doc_to_target
doc_to_choice: !function utils_winogrande.doc_to_choice
should_decontaminate: true
doc_to_decontamination_query: sentence
metric_list:
- metric: acc_norm
aggregation: !function agg_functions.agg_gpirt_winogrande
higher_is_better: true
metadata:
version: 0.0
import re
import datasets
""" This code mirrors the utils of the original hellaswag task """
def preprocess(text):
text = text.strip()
# NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag.
text = text.replace(" [title]", ". ")
text = re.sub("\\[.*?\\]", "", text)
text = text.replace(" ", " ")
return text
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
def _process_doc(doc):
ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize()
out_doc = {
"query": preprocess(doc["activity_label"] + ": " + ctx),
"choices": [preprocess(ending) for ending in doc["endings"]],
"gold": int(doc["label"]),
}
return out_doc
return dataset.map(_process_doc)
import datasets
import numpy as np
import sacrebleu
from rouge_score import rouge_scorer, scoring
""" This code mirrors the utils of the original truthful_qa task """
def process_results_mc2(doc, results):
lls, is_greedy = zip(*results)
# Split on the first `0` as everything before it is true (`1`).
split_idx = list(doc["mc2_targets"]["labels"]).index(0)
# Compute the normalized probability mass for the correct answer.
ll_true, ll_false = lls[:split_idx], lls[split_idx:]
p_true, p_false = np.exp(np.array(ll_true)), np.exp(np.array(ll_false))
p_true = p_true / (sum(p_true) + sum(p_false))
return {"acc": sum(p_true)}
def process_docs_gen(dataset: datasets.Dataset) -> datasets.Dataset:
return dataset.map(preprocess_function)
def preprocess_function(examples):
def _format_answers(answers):
formatted_answers = []
for answer in answers:
answer = answer.strip()
if len(answer):
# Add a period after all answers.
if answer[-1] != ".":
formatted_answers.append(answer + ".")
else:
formatted_answers.append(answer)
return formatted_answers
incorrect_answers = _format_answers(examples["incorrect_answers"])
correct_answers = _format_answers(examples["correct_answers"])
if "I have no comment." not in correct_answers:
correct_answers.append("I have no comment.")
return {
"question": examples["question"].strip(),
"correct_answers": correct_answers,
"incorrect_answers": incorrect_answers,
}
def process_results_gen(doc, results):
completion = results[0]
true_refs, false_refs = doc["correct_answers"], doc["incorrect_answers"]
all_refs = true_refs + false_refs
# Process the sentence-level BLEURT, BLEU, and ROUGE for similarity measures.
# # BLEURT
# bleurt_scores_true = self.bleurt.compute(
# predictions=[completion] * len(true_refs), references=true_refs
# )["scores"]
# bleurt_scores_false = self.bleurt.compute(
# predictions=[completion] * len(false_refs), references=false_refs
# )["scores"]
# bleurt_correct = max(bleurt_scores_true)
# bleurt_incorrect = max(bleurt_scores_false)
# bleurt_max = bleurt_correct
# bleurt_diff = bleurt_correct - bleurt_incorrect
# bleurt_acc = int(bleurt_correct > bleurt_incorrect)
# BLEU
bleu_scores = [bleu([[ref]], [completion]) for ref in all_refs]
bleu_correct = np.nanmax(bleu_scores[: len(true_refs)])
bleu_incorrect = np.nanmax(bleu_scores[len(true_refs) :])
bleu_max = bleu_correct
bleu_diff = bleu_correct - bleu_incorrect
bleu_acc = int(bleu_correct > bleu_incorrect)
# ROUGE-N
rouge_scores = [rouge([ref], [completion]) for ref in all_refs]
# ROUGE-1
rouge1_scores = [score["rouge1"] for score in rouge_scores]
rouge1_correct = np.nanmax(rouge1_scores[: len(true_refs)])
rouge1_incorrect = np.nanmax(rouge1_scores[len(true_refs) :])
rouge1_max = rouge1_correct
rouge1_diff = rouge1_correct - rouge1_incorrect
rouge1_acc = int(rouge1_correct > rouge1_incorrect)
# ROUGE-2
rouge2_scores = [score["rouge2"] for score in rouge_scores]
rouge2_correct = np.nanmax(rouge2_scores[: len(true_refs)])
rouge2_incorrect = np.nanmax(rouge2_scores[len(true_refs) :])
rouge2_max = rouge2_correct
rouge2_diff = rouge2_correct - rouge2_incorrect
rouge2_acc = int(rouge2_correct > rouge2_incorrect)
# ROUGE-L
rougeL_scores = [score["rougeLsum"] for score in rouge_scores]
rougeL_correct = np.nanmax(rougeL_scores[: len(true_refs)])
rougeL_incorrect = np.nanmax(rougeL_scores[len(true_refs) :])
rougeL_max = rougeL_correct
rougeL_diff = rougeL_correct - rougeL_incorrect
rougeL_acc = int(rougeL_correct > rougeL_incorrect)
return {
# "bleurt_max": bleurt_max,
# "bleurt_acc": bleurt_acc,
# "bleurt_diff": bleurt_diff,
"bleu_max": bleu_max,
"bleu_acc": bleu_acc,
"bleu_diff": bleu_diff,
"rouge1_max": rouge1_max,
"rouge1_acc": rouge1_acc,
"rouge1_diff": rouge1_diff,
"rouge2_max": rouge2_max,
"rouge2_acc": rouge2_acc,
"rouge2_diff": rouge2_diff,
"rougeL_max": rougeL_max,
"rougeL_acc": rougeL_acc,
"rougeL_diff": rougeL_diff,
}
def bleu(refs, preds):
"""
Returns `t5` style BLEU scores. See the related implementation:
https://github.com/google-research/text-to-text-transfer-transformer/blob/3d10afd51ba97ac29eb66ae701eca274488202f7/t5/evaluation/metrics.py#L41
:param refs:
A `list` of `list` of reference `str`s.
:param preds:
A `list` of predicted `str`s.
"""
score = sacrebleu.corpus_bleu(
preds,
refs,
smooth_method="exp",
smooth_value=0.0,
force=False,
lowercase=False,
tokenize="intl",
use_effective_order=False,
).score
return score
def rouge(refs, preds):
"""
Returns `t5` style ROUGE scores. See the related implementation:
https://github.com/google-research/text-to-text-transfer-transformer/blob/3d10afd51ba97ac29eb66ae701eca274488202f7/t5/evaluation/metrics.py#L68
:param refs:
A `list` of reference `strs`.
:param preds:
A `list` of predicted `strs`.
"""
rouge_types = ["rouge1", "rouge2", "rougeLsum"]
scorer = rouge_scorer.RougeScorer(rouge_types)
# Add newlines between sentences to correctly compute `rougeLsum`.
def _prepare_summary(summary):
summary = summary.replace(" . ", ".\n")
return summary
# Accumulate confidence intervals.
aggregator = scoring.BootstrapAggregator()
for ref, pred in zip(refs, preds):
ref = _prepare_summary(ref)
pred = _prepare_summary(pred)
aggregator.add_scores(scorer.score(ref, pred))
result = aggregator.aggregate()
return {type: result[type].mid.fmeasure * 100 for type in rouge_types}
""" This code mirrors the utils of the original winogrande task """
def doc_to_text(doc):
answer_to_num = {"1": 0, "2": 1}
return answer_to_num[doc["answer"]]
def doc_to_target(doc):
idx = doc["sentence"].index("_") + 1
return doc["sentence"][idx:].strip()
def doc_to_choice(doc):
idx = doc["sentence"].index("_")
options = [doc["option1"], doc["option2"]]
return [doc["sentence"][:idx] + opt for opt in options]
# TMMLU+
### Paper
Title: `An Improved Traditional Chinese Evaluation Suite for Foundation Model`
Abstract: `We present TMMLU+, a comprehensive dataset designed for the Traditional Chinese massive multitask language understanding dataset. TMMLU+ is a multiple-choice question-answering dataset with 66 subjects from elementary to professional level. Compared to its predecessor, TMMLU, TMMLU+ is six times larger and boasts a more balanced subject distribution. We included benchmark results in TMMLU+ from closed-source models and 24 open-weight Chinese large language models of parameters ranging from 1.8B to 72B. Our findings reveal that Traditional Chinese models still trail behind their Simplified Chinese counterparts. Additionally, current large language models have yet to outperform human performance in average scores. We publicly release our dataset and the corresponding benchmark source code.`
Homepage: [https://huggingface.co/datasets/ikala/tmmluplus](https://huggingface.co/datasets/ikala/tmmluplus)
### Citation
```
@article{ikala2024improved,
title={An Improved Traditional Chinese Evaluation Suite for Foundation Model},
author={Tam, Zhi-Rui and Pai, Ya-Ting and Lee, Yen-Wei and Cheng, Sega and Shuai, Hong-Han},
journal={arXiv preprint arXiv:2403.01858},
year={2024}
}
```
### Groups and Tasks
#### Groups
* `tmmluplus`: `The dataset comprises 22,690 multiple-choice questions from 66 subjects ranging from primary to professional level. `
#### Tasks
The following tasks evaluate subjects in the TMMLU+ dataset using loglikelihood-based multiple-choice scoring:
* `tmmluplus_{subject_english}`
### Checklist
For adding novel benchmarks/datasets to the library:
* [x] Is the task an existing benchmark in the literature?
* [x] Have you referenced the original paper that introduced the task?
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [x] Is the "Main" variant of this task clearly denoted?
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [x] Have you noted which, if any, published evaluation setups are matched by this variant?
dataset_path: ZoneTwelve/tmmluplus # a copy of `ikala/tmmluplus`
test_split: test
fewshot_split: train
fewshot_config:
sampler: first_n
output_type: multiple_choice
process_docs: !function utils.process_docs
doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:"
doc_to_choice: ["A", "B", "C", "D"]
doc_to_target: answer
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 0.1
"""
Take in a YAML, and output all "other" splits with this YAML
"""
import argparse
import os
import pandas as pd
import yaml
from tqdm import tqdm
# Copy from https://github.com/iKala/ievals/blob/main/ievals/settings.py
# from TMMLU+ offical example
categories = {
"STEM": [
"physics",
"chemistry",
"biology",
"computer science",
"math",
"engineering",
],
"humanities": ["history", "philosophy", "law"],
"social_sciences": [
"politics",
"culture",
"economics",
"geography",
"psychology",
"education",
],
"other": ["other", "business", "health"], # (business, health, misc.)
}
task_list = [
"engineering_math",
"dentistry",
"traditional_chinese_medicine_clinical_medicine",
"clinical_psychology",
"technical",
"culinary_skills",
"mechanical",
"logic_reasoning",
"real_estate",
"general_principles_of_law",
"finance_banking",
"anti_money_laundering",
"ttqav2",
"marketing_management",
"business_management",
"organic_chemistry",
"advance_chemistry",
"physics",
"secondary_physics",
"human_behavior",
"national_protection",
"jce_humanities",
"politic_science",
"agriculture",
"official_document_management",
"financial_analysis",
"pharmacy",
"educational_psychology",
"statistics_and_machine_learning",
"management_accounting",
"introduction_to_law",
"computer_science",
"veterinary_pathology",
"accounting",
"fire_science",
"optometry",
"insurance_studies",
"pharmacology",
"taxation",
"education_(profession_level)",
"economics",
"veterinary_pharmacology",
"nautical_science",
"occupational_therapy_for_psychological_disorders",
"trust_practice",
"geography_of_taiwan",
"physical_education",
"auditing",
"administrative_law",
"basic_medical_science",
"macroeconomics",
"trade",
"chinese_language_and_literature",
"tve_design",
"junior_science_exam",
"junior_math_exam",
"junior_chinese_exam",
"junior_social_studies",
"tve_mathematics",
"tve_chinese_language",
"tve_natural_sciences",
"junior_chemistry",
"music",
"education",
"three_principles_of_people",
"taiwanese_hokkien",
]
subject2name = {}
# subject2category = {}
SUBJECTS = {}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--base_yaml_path", required=True)
parser.add_argument("--save_prefix_path", default="tmmluplus")
parser.add_argument("--cot_prompt_path", default=None)
parser.add_argument("--task_prefix", default="")
parser.add_argument("--group_prefix", default="")
parser.add_argument("--subject_file", default="subject.tsv")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
from pathlib import Path
# Initialization
SUBJECT_FILE = Path(__file__).parent / Path(args.subject_file)
df = pd.read_csv(SUBJECT_FILE, delimiter="\t")
for _, row in df.iterrows():
for _c in categories:
if row["subject"] in SUBJECTS:
raise ValueError("Duplicate tasks.")
if row["category"] in categories[_c]: # append new item into SUBJECTS
SUBJECTS[row["subject"]] = _c
subject2name[row["subject"]] = row["name"]
break
# End of SUBJECTS initialization
# get filename of base_yaml so we can `"include": ` it in our "other" YAMLs.
base_yaml_name = os.path.split(args.base_yaml_path)[-1]
with open(args.base_yaml_path) as f:
base_yaml = yaml.full_load(f)
if args.cot_prompt_path is not None:
import json
with open(args.cot_prompt_path) as f:
cot_file = json.load(f)
ALL_CATEGORIES = []
for subject, category in tqdm(SUBJECTS.items()):
if category not in ALL_CATEGORIES:
ALL_CATEGORIES.append(category)
if args.cot_prompt_path is not None:
description = cot_file[subject]
else:
name_of_subject = subject2name[subject].replace("_", " ")
description = f"以下為{name_of_subject}的單選題,請提供正確答案的選項。\n\n"
# description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n"
yaml_dict = {
"include": base_yaml_name,
"group": f"tmmluplus_{args.task_prefix}_{category}"
if args.task_prefix != ""
else f"tmmluplus_{category}",
"group_alias": category.replace("_", " "),
"task": f"tmmluplus_{args.task_prefix}_{subject}"
if args.task_prefix != ""
else f"tmmluplus_{subject}",
"task_alias": subject.replace("_", " "),
"dataset_name": subject,
"description": description,
}
file_save_path = args.save_prefix_path + f"_{subject}.yaml"
# eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}")
with open(file_save_path, "w") as yaml_file:
yaml.dump(
yaml_dict,
yaml_file,
# width=float("inf"),
allow_unicode=True,
default_style='"',
)
if args.task_prefix != "":
mmlu_subcategories = [
f"tmmluplus_{args.task_prefix}_{category}" for category in ALL_CATEGORIES
]
else:
mmlu_subcategories = [f"tmmluplus_{category}" for category in ALL_CATEGORIES]
if args.group_prefix != "":
file_save_path = args.group_prefix + ".yaml"
else:
file_save_path = args.save_prefix_path + ".yaml"
# eval_logger.info(f"Saving benchmark config to {file_save_path}")
with open(file_save_path, "w") as yaml_file:
yaml.dump(
{
"group": f"tmmluplus_{args.task_prefix}"
if args.task_prefix != ""
else "tmmluplus",
"task": mmlu_subcategories,
},
yaml_file,
indent=4,
default_flow_style=False,
)
group: tmmluplus
task:
- tmmluplus_other
- tmmluplus_social_sciences
- tmmluplus_humanities
- tmmluplus_STEM
"dataset_name": "accounting"
"description": "以下為會計學的單選題,請提供正確答案的選項。\n\n"
"group": "tmmluplus_other"
"group_alias": "other"
"include": "_default_template_yaml"
"task": "tmmluplus_accounting"
"task_alias": "accounting"
"dataset_name": "administrative_law"
"description": "以下為行政法的單選題,請提供正確答案的選項。\n\n"
"group": "tmmluplus_humanities"
"group_alias": "humanities"
"include": "_default_template_yaml"
"task": "tmmluplus_administrative_law"
"task_alias": "administrative law"
"dataset_name": "advance_chemistry"
"description": "以下為化學的單選題,請提供正確答案的選項。\n\n"
"group": "tmmluplus_STEM"
"group_alias": "STEM"
"include": "_default_template_yaml"
"task": "tmmluplus_advance_chemistry"
"task_alias": "advance chemistry"
"dataset_name": "agriculture"
"description": "以下為農業的單選題,請提供正確答案的選項。\n\n"
"group": "tmmluplus_other"
"group_alias": "other"
"include": "_default_template_yaml"
"task": "tmmluplus_agriculture"
"task_alias": "agriculture"
"dataset_name": "anti_money_laundering"
"description": "以下為洗錢防制的單選題,請提供正確答案的選項。\n\n"
"group": "tmmluplus_humanities"
"group_alias": "humanities"
"include": "_default_template_yaml"
"task": "tmmluplus_anti_money_laundering"
"task_alias": "anti money laundering"
"dataset_name": "auditing"
"description": "以下為審計學的單選題,請提供正確答案的選項。\n\n"
"group": "tmmluplus_other"
"group_alias": "other"
"include": "_default_template_yaml"
"task": "tmmluplus_auditing"
"task_alias": "auditing"
"dataset_name": "basic_medical_science"
"description": "以下為基礎醫學的單選題,請提供正確答案的選項。\n\n"
"group": "tmmluplus_STEM"
"group_alias": "STEM"
"include": "_default_template_yaml"
"task": "tmmluplus_basic_medical_science"
"task_alias": "basic medical science"
"dataset_name": "business_management"
"description": "以下為企業管理的單選題,請提供正確答案的選項。\n\n"
"group": "tmmluplus_other"
"group_alias": "other"
"include": "_default_template_yaml"
"task": "tmmluplus_business_management"
"task_alias": "business management"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment