"test/vscode:/vscode.git/clone" did not exist on "88578483244bd698020e886e42b525a9172bae8b"
Commit a1757547 authored by Yen-Ting Lin's avatar Yen-Ting Lin
Browse files

add pega eval and legal eval

parent 13faf0e8
dataset_path: yentinglin/legal_benchmark
test_split: train
output_type: multiple_choice
process_docs: !function utils.process_docs
doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:"
doc_to_choice: ["A", "B", "C", "D"]
doc_to_target: answer
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 0.1
"""
Take in a YAML, and output all "other" splits with this YAML
"""
import argparse
import os
import pandas as pd
import yaml
from tqdm import tqdm
categories = {
"software": [
"default",
],
}
task_list = [
"default",
]
subject2name = {}
SUBJECTS = {}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--base_yaml_path", default="_default_template_yaml")
parser.add_argument("--save_prefix_path", default="pega")
parser.add_argument("--cot_prompt_path", default=None)
parser.add_argument("--task_prefix", default="")
parser.add_argument("--group_prefix", default="")
parser.add_argument("--subject_file", default="../subject.tsv")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
from pathlib import Path
# Initialization
SUBJECT_FILE = Path(__file__).parent / Path(args.subject_file)
df = pd.read_csv(SUBJECT_FILE, delimiter="\t")
for _, row in df.iterrows():
for _c in categories:
if row["subject"] in SUBJECTS:
raise ValueError(f"Duplicate tasks. {row['subject']} already exists.")
if row["category"] in categories[_c]: # append new item into SUBJECTS
SUBJECTS[row["subject"]] = _c
subject2name[row["subject"]] = row["name"]
break
# End of SUBJECTS initialization
# get filename of base_yaml so we can `"include": ` it in our "other" YAMLs.
base_yaml_name = os.path.split(args.base_yaml_path)[-1]
with open(args.base_yaml_path) as f:
base_yaml = yaml.full_load(f)
if args.cot_prompt_path is not None:
import json
with open(args.cot_prompt_path) as f:
cot_file = json.load(f)
ALL_CATEGORIES = []
for subject, category in tqdm(SUBJECTS.items()):
if category not in ALL_CATEGORIES:
ALL_CATEGORIES.append(category)
if args.cot_prompt_path is not None:
description = cot_file[subject]
else:
name_of_subject = subject2name[subject].replace("_", " ")
description = f"以下為{name_of_subject}的單選題,請提供正確答案的選項。\n\n"
# description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n"
yaml_dict = {
"include": base_yaml_name,
"group": f"pega_{args.task_prefix}_{category}"
if args.task_prefix != ""
else f"pega_{category}",
"group_alias": category.replace("_", " "),
"task": f"pega_{args.task_prefix}_{subject}"
if args.task_prefix != ""
else f"pega_{subject}",
"task_alias": subject.replace("_", " "),
"dataset_name": subject,
"description": description,
}
file_save_path = args.save_prefix_path + f"_{subject}.yaml"
# eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}")
with open(file_save_path, "w") as yaml_file:
yaml.dump(
yaml_dict,
yaml_file,
# width=float("inf"),
allow_unicode=True,
default_style='"',
)
if args.task_prefix != "":
mmlu_subcategories = [
f"pega_{args.task_prefix}_{category}" for category in ALL_CATEGORIES
]
else:
mmlu_subcategories = [f"pega_{category}" for category in ALL_CATEGORIES]
if args.group_prefix != "":
file_save_path = args.group_prefix + ".yaml"
else:
file_save_path = args.save_prefix_path + ".yaml"
# eval_logger.info(f"Saving benchmark config to {file_save_path}")
with open(file_save_path, "w") as yaml_file:
yaml.dump(
{
"group": f"pega_{args.task_prefix}"
if args.task_prefix != ""
else "pega",
"task": mmlu_subcategories,
},
yaml_file,
indent=4,
default_flow_style=False,
)
group: pega
task:
- pega_software
"dataset_name": "default"
"description": "以下為軟體的單選題,請提供正確答案的選項。\n\n"
"group": "pega_software"
"group_alias": "software"
"include": "_default_template_yaml"
"task": "pega_default"
"task_alias": "default"
import datasets
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
def _helper(doc):
# modifies the contents of a single
# document in our dataset.
answer_list = ["A", "B", "C", "D"]
choices = [doc["A"], doc["B"], doc["C"], doc["D"]]
out_doc = {
"questions": doc["question"],
"choices": choices,
"goal": answer_list.index(doc["answer"]),
}
return out_doc
return dataset.map(_helper) # returns back a datasets.Dataset object
subject name category
default 軟體 default
dataset_path: yentinglin/PegaEval
test_split: train
output_type: multiple_choice
process_docs: !function utils.process_docs
doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:"
doc_to_choice: ["A", "B", "C", "D"]
doc_to_target: answer
metric_list:
- metric: acc
aggregation: mean
higher_is_better: true
- metric: acc_norm
aggregation: mean
higher_is_better: true
metadata:
version: 0.1
"""
Take in a YAML, and output all "other" splits with this YAML
"""
import argparse
import os
import pandas as pd
import yaml
from tqdm import tqdm
categories = {
"law": [
"default",
],
}
task_list = [
"default",
]
subject2name = {}
SUBJECTS = {}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--base_yaml_path", default="_default_template_yaml")
parser.add_argument("--save_prefix_path", default="tw_legal")
parser.add_argument("--cot_prompt_path", default=None)
parser.add_argument("--task_prefix", default="")
parser.add_argument("--group_prefix", default="")
parser.add_argument("--subject_file", default="../subject.tsv")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
from pathlib import Path
# Initialization
SUBJECT_FILE = Path(__file__).parent / Path(args.subject_file)
df = pd.read_csv(SUBJECT_FILE, delimiter="\t")
for _, row in df.iterrows():
for _c in categories:
if row["subject"] in SUBJECTS:
raise ValueError(f"Duplicate tasks. {row['subject']} already exists.")
if row["category"] in categories[_c]: # append new item into SUBJECTS
SUBJECTS[row["subject"]] = _c
subject2name[row["subject"]] = row["name"]
break
# End of SUBJECTS initialization
# get filename of base_yaml so we can `"include": ` it in our "other" YAMLs.
base_yaml_name = os.path.split(args.base_yaml_path)[-1]
with open(args.base_yaml_path) as f:
base_yaml = yaml.full_load(f)
if args.cot_prompt_path is not None:
import json
with open(args.cot_prompt_path) as f:
cot_file = json.load(f)
ALL_CATEGORIES = []
for subject, category in tqdm(SUBJECTS.items()):
if category not in ALL_CATEGORIES:
ALL_CATEGORIES.append(category)
if args.cot_prompt_path is not None:
description = cot_file[subject]
else:
name_of_subject = subject2name[subject].replace("_", " ")
description = f"以下為{name_of_subject}的單選題,請提供正確答案的選項。\n\n"
# description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n"
yaml_dict = {
"include": base_yaml_name,
"group": f"tw_legal_{args.task_prefix}_{category}"
if args.task_prefix != ""
else f"tw_legal_{category}",
"group_alias": category.replace("_", " "),
"task": f"tw_legal_{args.task_prefix}_{subject}"
if args.task_prefix != ""
else f"tw_legal_{subject}",
"task_alias": subject.replace("_", " "),
"dataset_name": subject,
"description": description,
}
file_save_path = args.save_prefix_path + f"_{subject}.yaml"
# eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}")
with open(file_save_path, "w") as yaml_file:
yaml.dump(
yaml_dict,
yaml_file,
# width=float("inf"),
allow_unicode=True,
default_style='"',
)
if args.task_prefix != "":
mmlu_subcategories = [
f"tw_legal_{args.task_prefix}_{category}" for category in ALL_CATEGORIES
]
else:
mmlu_subcategories = [f"tw_legal_{category}" for category in ALL_CATEGORIES]
if args.group_prefix != "":
file_save_path = args.group_prefix + ".yaml"
else:
file_save_path = args.save_prefix_path + ".yaml"
# eval_logger.info(f"Saving benchmark config to {file_save_path}")
with open(file_save_path, "w") as yaml_file:
yaml.dump(
{
"group": f"tw_legal_{args.task_prefix}"
if args.task_prefix != ""
else "tw_legal",
"task": mmlu_subcategories,
},
yaml_file,
indent=4,
default_flow_style=False,
)
group: tw_legal
task:
- tw_legal_law
"dataset_name": "default"
"description": "以下為法律的單選題,請提供正確答案的選項。\n\n"
"group": "tw_legal_law"
"group_alias": "law"
"include": "_default_template_yaml"
"task": "tw_legal_default"
"task_alias": "default"
import datasets
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
def _helper(doc):
# modifies the contents of a single
# document in our dataset.
answer_list = ["A", "B", "C", "D"]
choices = [doc["A"], doc["B"], doc["C"], doc["D"]]
out_doc = {
"questions": doc["question"],
"choices": choices,
"goal": answer_list.index(doc["answer"]),
}
return out_doc
return dataset.map(_helper) # returns back a datasets.Dataset object
subject name category
default 法律 default
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment