Commit c16d506e authored by chenzk's avatar chenzk
Browse files

v1.0

parents
# Infinitebench dataset
[Infinitebench](https://github.com/OpenBMB/InfiniteBench) is composed of 12 major tasks.
## Create Hugging Face dataset
The processed Hugging Face dataset for Infinitebench can be found [here](https://huggingface.co/datasets/MaxJeblick/InfiniteBench). To reproduce this dataset, simply run the `create_huggingface_dataset.py` script.
\ No newline at end of file
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# Adapted from https://github.com/OpenBMB/InfiniteBench/blob/main/src/compute_scores.py
import json
import re
import string
from collections import Counter
from tqdm import tqdm
def calculate_metrics(df):
preds = df["predicted_answer"].tolist()
labels = df["answer"].tolist()
task = df["task"].tolist()[0]
model = "dummy_model" # not really used
return get_score(labels, preds, task, model)
def normalize_answer(s: str) -> str:
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def normalize_zh_answer(s: str) -> str:
"""Chinese version. Lower text and remove punctuation, extra whitespace."""
def white_space_fix(text):
return "".join(text.split())
def remove_punc(text):
cn_punctuation = "!?。。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏." # noqa
all_punctuation = set(string.punctuation + cn_punctuation)
return "".join(ch for ch in text if ch not in all_punctuation)
def lower(text):
return text.lower()
return white_space_fix(remove_punc(lower(s)))
def f1_score(prediction, ground_truth) -> tuple[float, float, float]:
common = Counter(prediction) & Counter(ground_truth)
num_same = sum(common.values())
if num_same == 0:
return 0, 0, 0
precision = 1.0 * num_same / len(prediction)
recall = 1.0 * num_same / len(ground_truth)
f1 = (2 * precision * recall) / (precision + recall)
return f1, precision, recall
def qa_f1_score(pred: str, ground_truths) -> float:
"""Computes the F1, recall, and precision."""
f1 = 0
prec = 0
recall = 0
for ground_truth in ground_truths:
normalized_prediction = normalize_answer(pred)
normalized_ground_truth = normalize_answer(ground_truth)
prediction_tokens = normalized_prediction.split()
ground_truth_tokens = normalized_ground_truth.split()
scores = f1_score(prediction_tokens, ground_truth_tokens)
this_f1, this_prec, this_recall = scores
f1 = max(f1, this_f1) # type: ignore[assignment]
prec = max(prec, this_prec) # type: ignore[assignment]
recall = max(recall, this_recall) # type: ignore[assignment]
return f1
def qa_f1_score_zh(pred: str, ground_truths: list[str]) -> float:
"""
QA F1 score for chinese.
"""
f1 = 0
prec = 0
recall = 0
for ground_truth in ground_truths:
norm_pred = normalize_zh_answer(pred)
norm_label = normalize_zh_answer(ground_truth)
# One character one token.
pred_tokens = list(norm_pred)
label_tokens = list(norm_label)
scores = f1_score(pred_tokens, label_tokens)
this_f1, this_prec, this_recall = scores
f1 = max(f1, this_f1) # type: ignore[assignment]
prec = max(prec, this_prec) # type: ignore[assignment]
recall = max(recall, this_recall) # type: ignore[assignment]
return f1
def load_json(fname):
return json.load(open(fname))
def iter_jsonl(fname, cnt=None):
i = 0
with open(fname, "r", encoding="utf8") as fin:
for line in fin:
if line.strip() == "": # Skip empty lines
continue
if i == cnt:
break
if line.strip() == "": # Skip empty lines
continue
yield json.loads(line)
i += 1
def first_int_match(prediction):
pred_list = re.split("[^0-9]", prediction)
pred_value = ""
for item in pred_list:
if item != "":
pred_value = item
break
return pred_value
def split_retrieval_answer(pred: str):
for c in ["\n", ":", '"', "'", ".", ",", "?", "!", "{", "}"]:
pred = pred.replace(c, " ")
words = pred.split()
return words
def get_score_one_kv_retrieval(pred, label, model_name: str) -> bool:
if isinstance(label, list):
label = label[0]
for c in ["\n", ":", '"', "'", ".", ",", "?", "!", "{", "}"]:
pred = pred.replace(c, " ")
words = pred.split()
return label in words
def get_score_one_passkey(pred, label, model_name: str) -> bool:
if isinstance(label, list):
label = label[0]
return label == first_int_match(pred)
def get_score_one_number_string(pred, label, model_name: str) -> bool:
if isinstance(label, list):
label = label[0]
return label == first_int_match(pred)
def get_score_one_code_run(pred, label, model_name: str) -> bool:
"""
Returns the score of one example in Code.Run.
"""
if isinstance(label, list):
label = label[0]
pred = pred.strip()
for c in ["\n", ".", "`", "'", '"', ":"]:
pred = pred.replace(c, " ")
words = pred.split()
if len(words) == 0:
return False
try:
pred = int(words[-1])
return label == pred
except Exception:
return False
def get_score_one_code_debug(pred, label, model_name: str) -> bool:
"""
Returns the score of one example in Code.Debug.
"""
pred = pred.strip()
label_c = label[1]
fn_name = label[0]
pattern = r"\b[A-J]\b(?!.*\b[A-J]\b)"
match = re.search(pattern, pred)
if match:
extracted_pred = match.group(0)
if extracted_pred == label_c:
return True
ans_prefixes = [
"answer is:",
# "answer is",
# "error is",
"is:",
"answer:",
"correct option is:",
]
pred = pred.strip()
for c in ["\n", "`", "'", '"', "-", "*", "Option", "option"]:
pred = pred.replace(c, " ")
while " " in pred:
pred = pred.replace(" ", " ")
if pred.startswith(label_c) or pred.startswith(fn_name):
return True
for prefix in ans_prefixes:
idx = pred.find(prefix)
if idx == -1:
continue
# The prediction ends with this prefix
if len(pred) < idx + len(prefix) + 1:
return False
pred = pred[idx + len(prefix) + 1 :]
for s in [label_c, fn_name]:
if pred.startswith(s):
return True
return False
return False
def get_score_one_math_find(pred, label, model_name: str) -> bool:
if isinstance(label, list):
# In math_find, there is always only one label.
label = label[0]
if isinstance(label, int):
# Find first int or float
first_num = re.search(r"\d+\.\d+|\d+", pred)
if first_num is None:
return False
first_num = first_num.group(0).strip()
return int(first_num) == label
elif isinstance(label, float):
# Find first float or int
first_float = re.search(r"\d+\.\d+|\d+", pred)
if first_float is None:
return False
first_float = first_float.group(0).strip()
return float(first_float) == label
else:
raise TypeError(f"Expected int or float, got {type(label)}")
def get_score_one_longdialogue_qa_eng(pred, label, model_name: str) -> int:
pred = pred.strip()
pred = pred.upper()
for item in label:
if item.upper() in pred:
return 1
return 0
def get_score_one_longbook_choice_eng(pred, label, model_name: str) -> bool:
# Just use the first letter as the prediction
pred = pred.strip()
pattern = r"\b[A-D]\b(?!.*\b[A-D]\b)"
match = re.search(pattern, pred)
if match:
extracted_pred = match.group(0)
if extracted_pred in label:
return True
if pred == "":
return False
if pred[0] in "ABCD":
return pred[0] in label
if pred in label:
return True
# Find a answer prefix
for c in ["\n", '"', "'", ".", ",", "?", "!", "{", "}"]:
pred = pred.replace(c, " ")
while " " in pred:
pred = pred.replace(" ", " ")
ans_prefixes = [
"answer is:",
"answer:",
"answer is",
"option is",
]
for prefix in ans_prefixes:
idx = pred.find(prefix)
if idx == -1:
continue
# The prediction ends with this prefix
if len(pred) < idx + len(prefix) + 1:
return False
after_prefix = pred[idx + len(prefix) + 1 :]
for s in label:
if after_prefix.startswith(s):
return True
return False
# Finally, just find the first occurrence of A, B, C, or D.
words = pred.split()
for word in words:
if word in "ABCD":
return word in label
return False
def get_score_one_longbook_qa_eng(pred, label, model_name: str) -> float:
return qa_f1_score(pred, label)
def get_score_one_longbook_qa_chn(pred, label, model_name: str) -> float:
return qa_f1_score_zh(pred, label)
def get_score_one_math_calc(pred, label, model_name: str) -> float:
assert isinstance(label, list), f"Expected list, got {type(label)}"
# assert isinstance(pred, list), f"Expected list, got {type(pred)}"
if isinstance(label[0], list):
label = label[0]
pred_nums = []
pred_list = re.split("[^0-9]", pred)
for item in pred_list:
if item != "":
pred_nums.append(int(item))
# Our prompts makes GPT4 always output the first number as the first value
# in the predicted answer.
if model_name == "gpt4":
pred_nums = pred_nums[1:]
cnt = 0
for i in range(len(label)):
if i >= len(pred_nums):
break
if label[i] == pred_nums[i]:
cnt += 1
else:
break
return cnt / len(label)
def get_score_one(pred: str, label: str, task_name: str, model_name: str) -> float:
"""
Computes the score for one prediction.
Returns one float (zero and one for boolean values).
"""
NAME_TO_SCORE_GETTER = {
# Retrieve
"kv_retrieval": get_score_one_kv_retrieval,
"kv_retrieval_prefix": get_score_one_kv_retrieval,
"kv_retrieval_both": get_score_one_kv_retrieval,
"passkey": get_score_one_passkey,
"number_string": get_score_one_number_string,
# Code
"code_run": get_score_one_code_run,
"code_debug": get_score_one_code_debug,
# Longbook
"longdialogue_qa_eng": get_score_one_longdialogue_qa_eng,
"longbook_qa_eng": get_score_one_longbook_qa_eng,
"longbook_choice_eng": get_score_one_longbook_choice_eng,
"longbook_qa_chn": get_score_one_longbook_qa_chn,
# Math
"math_find": get_score_one_math_find,
"math_calc": get_score_one_math_calc,
}
if task_name == "longbook_sum_eng":
raise AssertionError("Longbook sum task is not supported due to a conflict with rouge library. ")
assert task_name in NAME_TO_SCORE_GETTER, f"Invalid task name: {task_name}"
score = NAME_TO_SCORE_GETTER[task_name](pred, label, model_name)
return float(score)
def get_labels(preds: list) -> list[str]:
possible_label_keys = ["ground_truth", "label"]
for label_key in possible_label_keys:
if label_key in preds[0]:
return [x.get(label_key, "XXXXXXXXXX") for x in preds]
raise ValueError(f"Cannot find label in {preds[0]}")
def get_preds(preds: list, data_name: str) -> list[str]:
pred_strings = []
possible_pred_keys = ["prediction", "pred"]
for pred in preds:
this_pred = "NO PREDICTION"
for pred_key in possible_pred_keys:
if pred_key in pred:
this_pred = pred[pred_key]
break
else:
raise ValueError(f"Cannot find prediction in {pred}")
pred_strings.append(this_pred)
return pred_strings
def get_score(labels: list, preds: list, data_name: str, model_name: str) -> float:
"""
Computes the average score for a task.
"""
assert len(labels) == len(preds)
scores = []
for label, pred in tqdm(zip(labels, preds)):
score = get_score_one(pred, label, data_name, model_name)
scores.append(score)
return sum(scores) / len(scores)
def compute_scores(preds_path, data_name: str, model_name: str):
print("Loading prediction results from", preds_path)
preds = list(iter_jsonl(preds_path))
labels = get_labels(preds)
preds = get_preds(preds, data_name)
acc = get_score(labels, preds, data_name, model_name)
print(acc)
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import re
from datasets import Dataset, Features, Sequence, Value, load_dataset
"""
| Task Name | Context | # Examples | Avg Input Tokens | Avg Output Tokens | Description |
| -------------------- | ------------- | ---------- | ---------------- | ----------------- | ------------------------------------------------------------------------------------------- |
| passkey | Synthetic | 590 | 122.4k | 2.0 | Retrieving hidden keys in a noisy long context. |
| kv_retrieval | Synthetic | 500 | 89.9k | 22.7 | Finding the corresponding value from a dictionary and a key. |
| number_string | Synthetic | 590 | 122.4k | 4.0 | Locating repeated hidden numbers in a noisy long context. |
| code_run | Synthetic | 400 | 75.2k | 1.3 | Simulating execution of multiple simple, synthetic functions. |
| code_debug | Code Document | 394 | 114.7k | 4.8 | Finding which function in a code repo contains an crashing error (in multiple choice form). |
| math_find | Synthetic | 350 | 87.9k | 1.3 | Finding special integers in a lengthy list. |
| longbook_qa_eng | Fake Book | 351 | 192.6k | 4.8 | Free-form question answering based on the fake book. |
| longdialogue_qa_eng | Script | 200 | 103.6k | 3.4 | Identification of talkers in partially anonymized scripts. |
| longbook_choice_eng | Fake Book | 229 | 184.4k | 5.3 | Multiple choice questions derived from the fake book. |
"""
"""
Examples:
passkey:
context: "There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there. The pass key is 71432. Remember it. 71432 is the pass key. The grass is green. The sky is blue."
question: "What is the pass key?"
answer: ["71432"]
kv_retrieval:
context: "Extract the value corresponding to the specified key in the JSON object below. JSON data: {"e6aa4656-0eb5-4e1d-ad33-1ded282e0a78" ..."
question: "Key: "ce06788c-71b4-4f7a-b196-0fd9965a59c5" The value associated with the specified key is:"
answer: ["00a00042-6bcb-494f-9c35-57180f1e7251"]
number_string:
context: "There is an important info hidden inside a lot of irrelevant text. Find it. I will quiz you about the important information there. The sequence of digits is 2200012222. Remember it. 2200012222 is the sequence of digits. The grass is green. The sky is blue."
question: "What is the sequence number?"
answer: ["2200012222"]
longdialogue_qa_eng:
context: "Below is a dialogue script where one random occurrence of a character name is replaced with "$$MASK$$", and you should try to guess who that character is. The dialogue: --- BEASTS OF THE SOUTHERN WILD Written by Lucy Alibar & Benh Zeitlin FINAL DRAFT: Based on the stage play "Juicy and Delicious"
question: "Which character is $$MASK$$ ?"
answer: [ "ACE", "ACE ROTHSTEIN" ]
longbook_qa_eng:
context: "Read the book below and answer a question. ‘Yes, of course, if it’s fine to-morrow,’ said Mrs Bronwyn. ‘But you’ll have to be up with the lark,’ she added. "
question: "Which among Annalisa, Seb, Peyton, and Gannonmarie is not Mrs. Bronwyn's child?"
answer: [ "\"Peyton\"" ]
longbook_choice_eng:
context: "Read the book and answer the question. With a single drop of ink for a mirror, the Egyptian sorcerer undertakes to reveal to any chance comer far-reaching visions of the past. This is what I undertake to do for you, reader. With this drop of ink at the end of my pen, I will show you the roomy workshop "
question: "Which of the following is NOT one of Alain's chores at Hall Farm? Only one of the following options is correct, tell me the answer using one single letter (A, B, C, or D). Don't say anything else. A. Walking Georgie B. Taking care of Totty C. Working in the dairy D. Light housework"
answer: "["A"]"
"""
ft = Features(
{
"id": Value("int64"),
"context": Value("string"),
"input": Value("string"),
"answer": Sequence(Value("string")),
"options": Sequence(Value("string")),
}
)
# yarn_mistral_templates from: https://github.com/OpenBMB/InfiniteBench/blob/main/src/prompt.py
context_prefix = {
"passkey": "There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.\n\n{context}\n\n",
"kv_retrieval": "Extract the value corresponding to the specified key in the JSON object below.\n\n{context}\n\n",
"number_string": "There is an important info hidden inside a lot of irrelevant text. Find it. I will quiz you about the important information there.\n\n{context}\n\n",
"longdialogue_qa_eng": 'Below is a dialogue script where one random occurrence of a character name is replaced with "$$MASK$$", and you should try to guess who that character is.\n\n{context}\n\n',
"longbook_qa_eng": "Read the book below and answer a question. Be very concise in your answer.\n\n{context}\n\n",
"longbook_qa_chn": "请根据以下书籍回答我的问题。\n\n{context}\n\n",
"longbook_choice_eng": "Read the book and answer the question.\n\n{context}\n\n",
"math_calc": "Let us calculate the intermediate values of an expression.\n\nExpression: 1 + 3 + 4\nValues: [1, 4, 8]\n\nExpression: 8 - 3 + 2 - 4\nValues: [8, 5, 7, 3]\n\nExpression: {context}\n\n",
"code_debug": "Following is a Python code where exactly one of the functions/methods has a deliberate error that makes it crash.\n\n{context}\n\n",
"longbook_sum_eng": "Summarize the book below:\n\n{context}\n\n",
"math_find": "{prefix}\n\n{context}\n\n{input}\n\n",
"code_run": "There is a function called {func} in the following Python code.\n\n{context}\n\n",
}
question_template = {
"longbook_choice_eng": "\n\nOnly one of the following options is correct, tell me the answer using one single letter (A, B, C, or D). Don't say anything else.\nA. {OPTION_A}\nB. {OPTION_B}\nC. {OPTION_C}\nD. {OPTION_D}",
"code_debug": "\n\nOptions:\nA. {OPTION_A}\nB. {OPTION_B}\nC. {OPTION_C}\nD. {OPTION_D}",
}
answer_prefix = {
"kv_retrieval": "The value associated with the specified key is",
"passkey": "The pass key is:",
"number_string": "The sequence of digits is",
"longbook_sum_eng": "Summary:",
"longbook_choice_eng": "The letter of the correct answer is",
"longbook_qa_eng": "Answer:",
"longbook_qa_chn": "答案:",
"math_calc": "Values:",
"code_run": "The return value is:",
"code_debug": "The correct option is:",
"longdialogue_qa_eng": "The name that has been replaced with $$MASK$$ is likely",
}
DATA_NAME_TO_MAX_NEW_TOKENS = {
"passkey": 6,
"number_string": 12,
"kv_retrieval": 50,
"longbook_sum_eng": 1200,
"longbook_choice_eng": 40,
"longbook_qa_eng": 40,
"longbook_qa_chn": 40,
"longdialogue_qa_eng": 40,
"math_find": 3,
"math_calc": 30000,
"code_run": 5,
"code_debug": 5,
}
for task in [
"passkey",
"kv_retrieval",
"number_string",
"longdialogue_qa_eng",
"longbook_qa_eng",
"longbook_choice_eng",
"code_run",
"code_debug",
"math_find",
"math_calc",
"longbook_sum_eng",
"longbook_qa_chn",
]:
dataset = load_dataset("xinrongzhang2022/InfiniteBench", features=ft)
df = dataset[task].to_pandas()
assert (df.columns == ["id", "context", "input", "answer", "options"]).all()
if task not in ["longbook_choice_eng", "code_debug"]:
# Only longbook_choice_eng and code_debug have non-empty options column
assert (df["options"].apply(len) == 0).all()
df = df.rename(columns={"input": "question"})
df["answer_prefix"] = answer_prefix.get(task, "")
if task == "math_find":
# https://github.com/OpenBMB/InfiniteBench/blob/main/src/eval_utils.py#L328C1-L340C1
def update_math_find_context(row):
prompt = row["question"]
context = row["context"]
find_result = re.findall(r"The .+ of", prompt)
if not find_result:
raise AssertionError()
target_number = find_result[0].lower()[:-3]
prefix = f"What is {target_number} in the following list?"
return context_prefix["math_find"].format(
prefix=prefix,
context=context,
input=prompt,
)
df["context"] = df.apply(update_math_find_context, axis=1)
df["context"] = df["context"].apply(
lambda x: x.replace(
"You should answer with only one number, no other words. The largest number of the list is:", ""
)
)
elif task == "code_run":
# https://github.com/OpenBMB/InfiniteBench/blob/main/src/eval_utils.py#L272
def update_context(row):
find_result = re.findall(r"func_[0-9]+\(\-?[0-9]+\)", row["question"])
if not find_result:
raise AssertionError()
func_call = find_result[0]
func = func_call.split("(")[0]
return context_prefix["code_run"].format(
func=func,
context=row["context"],
)
df["context"] = df.apply(update_context, axis=1)
else:
df["context"] = df["context"].apply(lambda x: context_prefix[task].format(context=x))
if task in ["longbook_choice_eng", "code_debug"]:
df["question"] = df["question"] + df["options"].apply(
lambda x: question_template[task].format(OPTION_A=x[0], OPTION_B=x[1], OPTION_C=x[2], OPTION_D=x[3])
)
df["answer"] = df.apply(lambda row: ["ABCD"[list(row.options).index(row.answer)]], axis=1)
if task == "kv_retrieval":
# moved to answer prefix
df["question"] = df["question"].apply(
lambda x: x.replace("The value associated with the specified key is:", "")
)
df = df[["context", "question", "answer_prefix", "answer"]]
df["task"] = task
# be a bit more generous with token generation to avoid any cut-offs
df["max_new_tokens"] = DATA_NAME_TO_MAX_NEW_TOKENS[task] + 20
# Push to hub
dataset = Dataset.from_pandas(df)
dataset.push_to_hub("MaxJeblick/InfiniteBench", config_name=task, split="test")
# longbench dataset
[longbench](https://github.com/THUDM/LongBench/tree/main/LongBench).
## Create Hugging Face dataset
The processed Hugging Face dataset for longbench can be found [here](https://huggingface.co/datasets/Xnhyacinth/LongBench). To reproduce this dataset, simply run the `create_huggingface_dataset.py` script.
\ No newline at end of file
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import re
import string
from collections import Counter
import jieba
import numpy as np
from fuzzywuzzy import fuzz
from rouge import Rouge
def calculate_metrics(df):
predictions = df["predicted_answer"].tolist()
answers = df["answers"].tolist()
dataset = df["task"].tolist()[0]
all_classes = df["all_classes"].tolist()[0]
return scorer(dataset, predictions, answers, all_classes)
def calculate_metrics_e(df):
predictions = df["predicted_answer"].tolist()
answers = df["answers"].tolist()
dataset = df["task"].tolist()[0].removesuffix("-e")
all_classes = df["all_classes"].tolist()[0]
lengths = df["length"].tolist()
return scorer_e(dataset, predictions, answers, lengths, all_classes)
def scorer_e(dataset, predictions, answers, lengths, all_classes):
scores = {"0-4k": [], "4-8k": [], "8k+": []} # type:ignore[var-annotated]
for prediction, ground_truths, length in zip(predictions, answers, lengths):
score = 0.0
if dataset in ["trec", "triviaqa", "samsum", "lsht"]:
prediction = prediction.lstrip("\n").split("\n")[0]
for ground_truth in ground_truths:
score = max(score, dataset2metric[dataset](prediction, ground_truth, all_classes=all_classes))
if length < 4000:
scores["0-4k"].append(score)
elif length < 8000:
scores["4-8k"].append(score)
else:
scores["8k+"].append(score)
for key in scores.keys():
scores[key] = round(100 * np.mean(scores[key]), 2)
return scores
def scorer(dataset, predictions, answers, all_classes):
total_score = 0.0
for prediction, ground_truths in zip(predictions, answers):
score = 0.0
if dataset in ["trec", "triviaqa", "samsum", "lsht"]:
prediction = prediction.lstrip().split("\n")[0]
for ground_truth in ground_truths:
score = max(score, dataset2metric[dataset](prediction.lstrip(), ground_truth, all_classes=all_classes))
total_score += score
return round(100 * total_score / len(predictions), 2)
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def normalize_zh_answer(s):
"""Lower text and remove punctuation, extra whitespace."""
def white_space_fix(text):
return "".join(text.split())
def remove_punc(text):
cn_punctuation = "!?。。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
all_punctuation = set(string.punctuation + cn_punctuation)
return "".join(ch for ch in text if ch not in all_punctuation)
def lower(text):
return text.lower()
return white_space_fix(remove_punc(lower(s)))
def count_score(prediction, ground_truth, **kwargs):
numbers = re.findall(r"\d+", prediction)
right_num = 0
for number in numbers:
if str(number) == str(ground_truth):
right_num += 1
final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers)
return float(final_score)
def retrieval_score(prediction, ground_truth, **kwargs):
pattern = r"Paragraph (\d+)"
matches = re.findall(pattern, ground_truth)
ground_truth_id = matches[0]
numbers = re.findall(r"\d+", prediction)
right_num = 0
for number in numbers:
if str(number) == str(ground_truth_id):
right_num += 1
final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers)
return float(final_score)
def retrieval_zh_score(prediction, ground_truth, **kwargs):
pattern = r"段落(\d+)"
matches = re.findall(pattern, ground_truth)
ground_truth_id = matches[0]
numbers = re.findall(r"\d+", prediction)
right_num = 0
for number in numbers:
if str(number) == str(ground_truth_id):
right_num += 1
final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers)
return float(final_score)
def code_sim_score(prediction, ground_truth, **kwargs):
all_lines = prediction.lstrip("\n").split("\n")
prediction = ""
for line in all_lines:
if ("`" not in line) and ("#" not in line) and ("//" not in line):
prediction = line
break
return fuzz.ratio(prediction, ground_truth) / 100
def classification_score(prediction, ground_truth, **kwargs):
em_match_list = []
all_classes = kwargs["all_classes"]
for class_name in all_classes:
if class_name in prediction:
em_match_list.append(class_name)
for match_term in em_match_list:
if match_term in ground_truth and match_term != ground_truth:
em_match_list.remove(match_term)
if ground_truth in em_match_list:
score = 1.0 / len(em_match_list)
else:
score = 0.0
return score
def rouge_score(prediction, ground_truth, **kwargs):
rouge = Rouge()
try:
scores = rouge.get_scores([prediction], [ground_truth], avg=True)
except Exception as e:
print(f"An error occurred: {e}")
return 0.0
return scores["rouge-l"]["f"]
def rouge_zh_score(prediction, ground_truth, **kwargs):
prediction = " ".join(list(jieba.cut(prediction, cut_all=False)))
ground_truth = " ".join(list(jieba.cut(ground_truth, cut_all=False)))
score = rouge_score(prediction, ground_truth)
return score
def f1_score(prediction, ground_truth, **kwargs):
common = Counter(prediction) & Counter(ground_truth)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction)
recall = 1.0 * num_same / len(ground_truth)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def qa_f1_score(prediction, ground_truth, **kwargs):
normalized_prediction = normalize_answer(prediction)
normalized_ground_truth = normalize_answer(ground_truth)
prediction_tokens = normalized_prediction.split()
ground_truth_tokens = normalized_ground_truth.split()
return f1_score(prediction_tokens, ground_truth_tokens)
def qa_f1_zh_score(prediction, ground_truth, **kwargs):
prediction_tokens = list(jieba.cut(prediction, cut_all=False))
ground_truth_tokens = list(jieba.cut(ground_truth, cut_all=False))
prediction_tokens = [normalize_zh_answer(token) for token in prediction_tokens]
ground_truth_tokens = [normalize_zh_answer(token) for token in ground_truth_tokens]
prediction_tokens = [token for token in prediction_tokens if len(token) > 0]
ground_truth_tokens = [token for token in ground_truth_tokens if len(token) > 0]
return f1_score(prediction_tokens, ground_truth_tokens)
dataset2metric = {
"narrativeqa": qa_f1_score,
"qasper": qa_f1_score,
"multifieldqa_en": qa_f1_score,
"multifieldqa_zh": qa_f1_zh_score,
"hotpotqa": qa_f1_score,
"2wikimqa": qa_f1_score,
"musique": qa_f1_score,
"dureader": rouge_zh_score,
"gov_report": rouge_score,
"qmsum": rouge_score,
"multi_news": rouge_score,
"vcsum": rouge_zh_score,
"trec": classification_score,
"triviaqa": qa_f1_score,
"samsum": rouge_score,
"lsht": classification_score,
"passage_retrieval_en": retrieval_score,
"passage_count": count_score,
"passage_retrieval_zh": retrieval_zh_score,
"lcc": code_sim_score,
"repobench-p": code_sim_score,
}
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from datasets import Dataset, load_dataset
# yarn_mistral_templates from: https://github.com/THUDM/LongBench/blob/main/LongBench/pred.py
context_prefix = {
"narrativeqa": "You are given a story, which can be either a novel or a movie script, and a question. Answer the question asconcisely as you can, using a single phrase if possible. Do not provide any explanation.\n\nStory: {context}\n\nNow, answer the question based on the story asconcisely as you can, using a single phrase if possible. Do not provide any explanation.\n\n",
"qasper": 'You are given a scientific article and a question. Answer the question as concisely as you can, using a single phrase or sentence if possible. If the question cannot be answered based on the information in the article, write "unanswerable". If the question is a yes/no question, answer "yes", "no", or "unanswerable". Do not provide any explanation.\n\nArticle: {context}\n\n Answer the question based on the above article as concisely as you can, using a single phrase or sentence if possible. If the question cannot be answered based on the information in the article, write "unanswerable". If the question is a yes/no question, answer "yes", "no", or "unanswerable". Do not provide any explanation.\n\n',
"multifieldqa_en": "Read the following text and answer briefly.\n\n{context}\n\nNow, answer the following question based on the above text, only give me the answer and do not output any other words.\n\n",
"multifieldqa_zh": "阅读以下文字并用中文简短回答:\n\n{context}\n\n现在请基于上面的文章回答下面的问题,只告诉我答案,不要输出任何其他字词。\n\n",
"hotpotqa": "Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\n",
"2wikimqa": "Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\n",
"musique": "Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\n",
"dureader": "请基于给定的文章回答下述问题。\n\n文章:{context}\n\n请基于上述文章回答下面的问题。\n\n",
"gov_report": "You are given a report by a government agency. Write a one-page summary of the report.\n\nReport:\n{context}\n\n",
"qmsum": "You are given a meeting transcript and a query containing a question or instruction. Answer the query in one or more sentences.\n\nTranscript:\n{context}\n\nNow, answer the query based on the above meeting transcript in one or more sentences.\n\n",
"multi_news": "You are given several news passages. Write a one-page summary of all news. \n\nNews:\n{context}\n\n",
"vcsum": "下面有一段会议记录,请你阅读后,写一段总结,总结会议的内容。\n会议记录:\n{context}\n\n",
"trec": "Please determine the type of the question below. Here are some examples of questions.\n\n{context}\n",
"triviaqa": "Answer the question based on the given passage. Only give me the answer and do not output any other words. The following are some examples.\n\n{context}\n\n",
"samsum": "Summarize the dialogue into a few short sentences. The following are some examples.\n\n{context}\n\n",
"lsht": "请判断给定新闻的类别,下面是一些例子。\n\n{context}\n",
"passage_count": "There are some paragraphs below sourced from Wikipedia. Some of them may be duplicates. Please carefully read these paragraphs and determine how many unique paragraphs there are after removing duplicates. In other words, how many non-repeating paragraphs are there in total?\n\n{context}\n\n",
"passage_retrieval_en": "Here are 30 paragraphs from Wikipedia, along with an abstract. Please determine which paragraph the abstract is from.\n\n{context}\n\nThe following is an abstract.\n\n",
"passage_retrieval_zh": "以下是若干段落文字,以及其中一个段落的摘要。请确定给定的摘要出自哪一段。\n\n{context}\n\n下面是一个摘要\n\n",
"lcc": "Please complete the code given below. \n{context}",
"repobench-p": "Please complete the code given below. \n{context}",
}
question_template = {
"narrativeqa": "Question: {input}\n\n",
"qasper": "Question: {input}\n\n",
"multifieldqa_en": "Question: {input}\n",
"multifieldqa_zh": "问题:{input}\n",
"hotpotqa": "Question: {input}\n",
"2wikimqa": "Question: {input}\n",
"musique": "Question: {input}\n",
"dureader": "问题:{input}\n",
"gov_report": "Now, write a one-page summary of the report.\n\n",
"qmsum": "Query: {input}\n",
"multi_news": "Now, write a one-page summary of all the news.\n\n",
"vcsum": "",
"trec": "{input}",
"triviaqa": "{input}",
"samsum": "{input}",
"lsht": "{input}",
"passage_count": "Please enter the final count of unique paragraphs after removing duplicates. The output format should only contain the number, such as 1, 2, 3, and so on.\n\n",
"passage_retrieval_en": '{input}\n\nPlease enter the number of the paragraph that the abstract is from. The answer format must be like "Paragraph 1", "Paragraph 2", etc.\n\n',
"passage_retrieval_zh": '{input}\n\n请输入摘要所属段落的编号。答案格式必须是"段落1","段落2"等格式\n\n',
"lcc": "{input}",
"repobench-p": "{input}",
}
answer_prefix = {
"narrativeqa": "Answer:",
"qasper": "Answer:",
"multifieldqa_en": "Answer:",
"multifieldqa_zh": "回答:",
"hotpotqa": "Answer:",
"2wikimqa": "Answer:",
"musique": "Answer:",
"dureader": "回答:",
"gov_report": "Summary:",
"qmsum": "Answer:",
"trec": "Type:",
"multi_news": "Summary:",
"samsum": "Summary:",
"triviaqa": "Answer:",
"vcsum": "会议总结:",
"passage_count": "The final answer is: ",
"passage_retrieval_en": "The answer is: ",
"passage_retrieval_zh": "答案是:",
"lcc": "Next line of code:\n",
"repobench-p": "Next line of code:\n",
}
DATA_NAME_TO_MAX_NEW_TOKENS = {
"narrativeqa": 128,
"qasper": 128,
"multifieldqa_en": 64,
"multifieldqa_zh": 64,
"hotpotqa": 32,
"2wikimqa": 32,
"musique": 32,
"dureader": 128,
"gov_report": 512,
"qmsum": 512,
"multi_news": 512,
"vcsum": 512,
"trec": 64,
"triviaqa": 32,
"samsum": 128,
"lsht": 64,
"passage_count": 32,
"passage_retrieval_en": 32,
"passage_retrieval_zh": 32,
"lcc": 64,
"repobench-p": 64,
}
# Longbench
for task in [
"narrativeqa",
"qasper",
"multifieldqa_en",
"hotpotqa",
"2wikimqa",
"musique",
"gov_report",
"qmsum",
"multi_news",
"trec",
"triviaqa",
"samsum",
"passage_count",
"passage_retrieval_en",
"lcc",
"repobench-p",
]:
dataset = load_dataset("THUDM/LongBench", task, split="test")
dataset = dataset.map(lambda x: {"context": context_prefix[task].format(**x)})
if task == "trec":
dataset = dataset.map(
lambda x: {"input": question_template[task].format(input=x["input"].removesuffix("Type:"))}
)
elif task == "triviaqa":
dataset = dataset.map(
lambda x: {"input": question_template[task].format(input=x["input"].removesuffix("Answer:"))}
)
elif task == "samsum":
dataset = dataset.map(
lambda x: {"input": question_template[task].format(input=x["input"].removesuffix("Summary:"))}
)
else:
dataset = dataset.map(lambda x: {"input": question_template[task].format(**x)})
df = dataset.to_pandas()
df = df.rename(columns={"input": "question"})
df["answer_prefix"] = answer_prefix.get(task, "")
# df = df[["context", "question", "answer_prefix", "answers", "all_classes"]]
df["task"] = task
# be a bit more generous with token generation to avoid any cut-offs
df["max_new_tokens"] = DATA_NAME_TO_MAX_NEW_TOKENS[task] + 20
# Push to hub
dataset = Dataset.from_pandas(df)
dataset.push_to_hub("Xnhyacinth/LongBench", config_name=task, split="test")
# Longbench-e
for task in [
"qasper",
"multifieldqa_en",
"hotpotqa",
"2wikimqa",
"gov_report",
"multi_news",
"trec",
"triviaqa",
"samsum",
"passage_count",
"passage_retrieval_en",
"lcc",
"repobench-p",
]:
dataset = load_dataset("THUDM/LongBench", f"{task}_e", split="test")
dataset = dataset.map(lambda x: {"context": context_prefix[task].format(**x)})
if task == "trec":
dataset = dataset.map(
lambda x: {"input": question_template[task].format(input=x["input"].removesuffix("Type:"))}
)
elif task == "triviaqa":
dataset = dataset.map(
lambda x: {"input": question_template[task].format(input=x["input"].removesuffix("Answer:"))}
)
elif task == "samsum":
dataset = dataset.map(
lambda x: {"input": question_template[task].format(input=x["input"].removesuffix("Summary:"))}
)
else:
dataset = dataset.map(lambda x: {"input": question_template[task].format(**x)})
df = dataset.to_pandas()
df = df.rename(columns={"input": "question"})
df["answer_prefix"] = answer_prefix.get(task, "")
# df = df[["context", "question", "answer_prefix", "answers", "all_classes"]]
df["task"] = task
# be a bit more generous with token generation to avoid any cut-offs
df["max_new_tokens"] = DATA_NAME_TO_MAX_NEW_TOKENS[task] + 20
# Push to hub
dataset = Dataset.from_pandas(df)
dataset.push_to_hub("Xnhyacinth/LongBench", config_name=f"{task}_e", split="test")
# LongBench-v2 dataset
[longbench](https://github.com/THUDM/LongBench).
## Create Hugging Face dataset
The processed Hugging Face dataset for LongBench-v2 can be found [here](https://huggingface.co/datasets/simonjegou/LongBench-v2). To reproduce this dataset, simply run the `create_huggingface_dataset.py` script.
\ No newline at end of file
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
def score(predicted_answer, expected_answer):
# From https://github.com/THUDM/LongBench/blob/main/pred.py (extract_answer function)
predicted_answer = predicted_answer.replace("*", "")
r1 = f"The correct answer is ({expected_answer})" in predicted_answer
r2 = f"The correct answer is {expected_answer}" in predicted_answer
return r1 or r2
def calculate_metrics(df):
df["score"] = df.apply(lambda row: score(row["predicted_answer"], row["answer"]), axis=1)
metrics = {"average": df["score"].mean()}
metrics.update(df.groupby("difficulty")["score"].mean())
metrics.update(df.groupby("length")["score"].mean())
return metrics
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from datasets import Dataset, load_dataset
# Templates from https://github.com/THUDM/LongBench/blob/main/prompts/0shot.txt
context_template = """Please read the following text and answer the question below.
<text>
{context}
</text>
"""
question_template = """What is the correct answer to this question: {question}
Choices:
(A) {A}
(B) {B}
(C) {C}
(D) {D}
Format your response as follows: "The correct answer is (insert answer here)."""
# Longbench-v2
df = load_dataset("THUDM/LongBench-v2", split="train").to_pandas()
df["context"] = df["context"].apply(lambda x: context_template.format(context=x))
df["question"] = df.apply(
lambda row: question_template.format(
question=row["question"],
A=row["choice_A"],
B=row["choice_B"],
C=row["choice_C"],
D=row["choice_D"],
),
axis=1,
)
df["max_new_tokens"] = 16
df["answer_prefix"] = ""
Dataset.from_pandas(df).push_to_hub("simonjegou/LongBench-v2", config_name="0shot", split="test")
# Loogle dataset
[Loogle](https://github.com/bigai-nlco/LooGLE/tree/main) is composed of 7 major tasks to evaluate LLMs' ability to understand both short and long dependency content.
## Create Hugging Face dataset
The Hugging Face dataset for Loogle can be found [here](https://huggingface.co/datasets/simonjegou/loogle). To reproduce this dataset, simply run the `create_huggingface_dataset.py` script.
\ No newline at end of file
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import nltk
import pandas as pd
from bert_score import score
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.meteor_score import single_meteor_score
from rouge import Rouge
# Code below is adapted from https://github.com/bigai-nlco/LooGLE/blob/main/Evaluation/automatic_metrics.py
def get_bleu_score(reference, hypothesis):
reference, hypothesis = (
reference.replace("\n", " ").split(),
hypothesis.replace("\n", " ").split(),
)
bleu1 = sentence_bleu([reference], hypothesis, weights=(1, 0, 0, 0))
bleu4 = sentence_bleu([reference], hypothesis, weights=(0, 0, 0, 1))
return {"bleu1": bleu1, "bleu4": bleu4}
def get_rouge_score(reference, hypothesis, metric="r"):
rouge = Rouge()
rouge_ = rouge.get_scores(hyps=[hypothesis], refs=[reference])[0]
return dict((key, rouge_[key][metric]) for key in ["rouge-1", "rouge-2", "rouge-l"])
def get_meteor_score(reference, hypothesis):
reference, hypothesis = (
reference.replace("\n", " ").split(),
hypothesis.replace("\n", " ").split(),
)
meteor = single_meteor_score(set(reference), set(hypothesis))
return {"meteor": float(meteor)}
def get_exact_match(reference, hypothesis):
try:
reference = eval(reference)
count = len(reference)
hypothesis = eval(hypothesis)
assert isinstance(hypothesis, dict)
except Exception:
return 0, 1
exact_score_count = 0
for key, value in reference.items():
if hypothesis.get(key) == value:
exact_score_count += 1
return exact_score_count, count
def get_partial_match(reference, hypothesis):
reference = eval(reference)
count = len(reference)
try:
hypothesis = eval(hypothesis)
assert isinstance(hypothesis, dict)
partial_score_count = 0
for key in reference:
if key in hypothesis:
true_set = set(reference[key].split())
pred_set = set(hypothesis[key].split())
if len(true_set.intersection(pred_set)) > 0:
partial_score_count += 1
return partial_score_count, count
except Exception:
return 0, count
def try_except_metric(metric_fn):
def wrapped_metric(answer, predicted_answer):
try:
return metric_fn(answer, predicted_answer)
except Exception as e:
print(f"Cannot calculate metric: {e}" f" on answer:{answer} and predicted_answer:{predicted_answer}")
return {key: 0.0 for key in metric_fn("Hi there", "hi there")}
return wrapped_metric
def calculate_metrics(df: pd.DataFrame) -> dict:
nltk.download("wordnet")
scores: dict = {}
for task, df_task in df.groupby("task"):
scores[task] = {}
if task == "shortdep_cloze":
for prefix, metric_fn in [
("exact", get_exact_match),
("partial", get_partial_match),
]:
match, count = zip(*df_task.apply(lambda x: metric_fn(x["answer"], x["predicted_answer"]), axis=1))
scores[task][f"{prefix}_match"] = round(sum(match) / sum(count), 4)
else:
df["predicted_answer"] = df["predicted_answer"].apply(lambda x: x if isinstance(x, str) else "<NONE>")
for metric_fn in [get_bleu_score, get_rouge_score, get_meteor_score]: # type: ignore
metric_fn = try_except_metric(metric_fn)
metric_scores = [metric_fn(row["answer"], row["predicted_answer"]) for _, row in df_task.iterrows()]
scores[task].update(pd.DataFrame(metric_scores).mean().to_dict())
# BERT scores (batched)
scores[task]["bert"] = (
score(df_task["answer"].to_list(), df_task["predicted_answer"].to_list(), lang="EN")[1].mean().item()
)
return scores
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import pandas as pd
from datasets import Dataset, load_dataset
# Templates based on https://github.com/bigai-nlco/LooGLE/blob/main/config/task2prompt.json
context_prompt = {
"shortdep_qa": "Please answer the question based on the long texts below. \n{input}",
"longdep_qa": "Please answer the question based on the long texts below. \n{input}",
"shortdep_cloze": "Please fill in the clozes based on the given long texts below. Each of the placeholder '<mask-n>' in the question could be an entity of Person, Location or Organiocation. The same masks represent the same entity. Output a json format answer, for example: {{'<mask-0>': 'Bob', '<mask-1>': 'Gorrosion Magazine','<mask-2>': 'Bethel Horizon'}}\n{input}", # noqa
"longdep_summarization": "Please generate a summary of the below paper. \n{input}",
}
question_prompt = {
"shortdep_qa": "\nQuestion: {Q}\n",
"longdep_qa": "\nQuestion: {Q}\n",
"shortdep_cloze": "\n Question: {Q} What are the masked entities?\n",
"longdep_summarization": "{Q}\n",
}
answer_prefix = {
"shortdep_qa": "Answer: ",
"longdep_qa": "Answer: ",
"shortdep_cloze": "Answer:",
"longdep_summarization": "Summarization: ",
}
# Source: https://github.com/bigai-nlco/LooGLE/blob/main/config/task2maxlen.json
max_new_tokens = {"shortdep_qa": 300, "longdep_qa": 500, "longdep_summarization": 500, "shortdep_cloze": 50}
for task in ["shortdep_qa", "longdep_qa", "shortdep_cloze", "longdep_summarization"]:
df = load_dataset("bigainlco/LooGLE", task, split="test", trust_remote_code=True).to_pandas()
if task == "longdep_summarization":
df["question"] = ""
df = df.rename(columns={"output": "answer", "input": "context"})
else:
df["qa_pairs"] = df["qa_pairs"].apply(lambda x: eval(x) if x != "none" else [{"Q": "", "A": "", "S": [""]}])
df = df.explode("qa_pairs")
df = pd.concat([df.drop(["qa_pairs"], axis=1), df["qa_pairs"].apply(pd.Series)], axis=1)
df = df.rename(columns={"A": "answer", "Q": "question", "input": "context"})
if task == "shortdep_cloze":
df["answer"] = df["answer"].apply(json.dumps, ensure_ascii=False)
df["context"] = df["context"].apply(lambda x: context_prompt[task].format(input=x))
df["question"] = df["question"].apply(lambda x: question_prompt[task].format(Q=x))
df["answer_prefix"] = answer_prefix[task]
df = df[["context", "question", "answer_prefix", "answer"]]
df["task"] = task
df["max_new_tokens"] = max_new_tokens[task]
# Push to hub
dataset = Dataset.from_pandas(df)
dataset.push_to_hub("simonjegou/loogle", config_name=task, split="test")
# MATH-500
Adapted from https://huggingface.co/datasets/HuggingFaceH4/MATH-500
This dataset contains a subset of 500 problems from the MATH benchmark that OpenAI created in their Let's Verify Step by Step paper. See their GitHub repo for the source file: https://github.com/openai/prm800k/tree/main?tab=readme-ov-file#math-splits
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import pandas as pd
def extract_boxed(pred_answer):
try:
return str(pred_answer.split("boxed{")[1].split("}")[0])
except IndexError:
return None
def score_aime(pred_answer, true_answer):
return extract_boxed(pred_answer) == str(true_answer)
def calculate_metrics(df: pd.DataFrame) -> dict:
correct = 0
answered = 0
for index, row in df.iterrows():
correct += score_aime(row["predicted_answer"], row["answer"])
answered += "boxed{" in row["predicted_answer"]
return {"correct": correct, "answered": answered, "accuracy": correct / len(df), "total": len(df)}
# Needle in a Haystack
This benchmark evaluates a model's ability to retrieve a specific piece of information, the "needle," hidden within a large body of text, the "haystack." The test challenges a model's long-context understanding and its ability to maintain information accuracy over increasing document lengths.
We follow the vast majority of the literature and use [Paul Graham's essays](https://huggingface.co/datasets/alessiodevoto/paul_graham_essays) as the haystack.
> The default needle is a sentence defined in the dataset itself, but it can be replaced by a custom sentence (e.g. for the passkey retrieval or similar tests). To do that, check [utils.py](./utils.py).
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment