"...git@developer.sourcefind.cn:OpenDAS/mmdetection3d.git" did not exist on "1f0aeba1dc3647c0d533afa70aa3a4191d87bca3"
Unverified Commit 8142f399 authored by Mo Li's avatar Mo Li Committed by GitHub
Browse files

[Feature] Upgrade the needle-in-a-haystack experiment to Needlebench (#913)

* add needlebench

* simplify needlebench 32k, 128k, 200k for eval

* update act prompt

* fix bug in needlebench summarizer

* add needlebench intro, fix summarizer

* lint summarizer

* fix linting error

* move readme.md

* update readme for needlebench

* update docs of needlebench

* simplify needlebench summarizers
parent 120bf8b3
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.needlebench.parallel import NeedleBenchParallelDataset
from opencompass.datasets.needlebench.parallel import NeedleBenchParallelEvaluator
from opencompass.datasets.needlebench.origin import needlebench_postprocess
from opencompass.datasets.needlebench.origin import needlebench_dataset_postprocess
import math
def logistic(x, L=100, x0=50, k=0.1):
return round(L / (1 + math.exp(-k * (x - x0))), 3)
def generate_linear_space(start, end, num):
if num == 1:
return [start]
elif num < 1:
raise ValueError("num must be at least 1.")
step = (end - start) / (num - 1)
return [start + step * i for i in range(num)]
def generate_depth_percents(intervals, interval_type):
if interval_type == 'linear':
return generate_linear_space(0, 100, intervals)
elif interval_type == 'sigmoid':
linear_space = generate_linear_space(0, 100, intervals)
return [logistic(x) for x in linear_space]
else:
raise ValueError('Unsupported interval type')
needlebench_reader_cfg = dict(input_columns=['prompt'], output_column='answer')
needlebench_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{prompt}'),
dict(role='BOT', prompt='{answer}\n'),
]
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
needlebench_eval_cfg = dict(
evaluator=dict(type=NeedleBenchParallelEvaluator),
pred_postprocessor=dict(type=needlebench_postprocess),
dataset_postprocessor=dict(type=needlebench_dataset_postprocess),
pred_role='BOT')
context_lengths = list(range(5000, 9000, 1000))
document_depth_percent_intervals = 20
document_depth_percent_interval_type = "linear"
base_path = './data/needlebench'
file_list = ['PaulGrahamEssays.jsonl']
needlebench_datasets_en = []
needle_file_name = 'needles.jsonl'
depths_float = generate_depth_percents(
document_depth_percent_intervals,
document_depth_percent_interval_type)
depths = [int(depth) for depth in depths_float]
for original_context_length in context_lengths:
dataset_dict = {
'abbr': f'Length{original_context_length}'
f'_parallel_en_8k',
'type': NeedleBenchParallelDataset,
'path': base_path,
'needle_file_name': needle_file_name,
'length': original_context_length,
'depths': depths,
'tokenizer_model': 'gpt-4',
'file_list': file_list,
'num_repeats_per_file': 25,
'length_buffer': 1300,
'guide': True,
'language': 'English',
'reader_cfg': needlebench_reader_cfg,
'infer_cfg': needlebench_infer_cfg,
'eval_cfg': needlebench_eval_cfg
}
needlebench_datasets_en.append(dataset_dict)
file_list = ['zh_finance.jsonl']
needlebench_datasets_zh = []
for original_context_length in context_lengths:
dataset_dict = {
'abbr': f'Length{original_context_length}'
f'_parallel_zh_8k',
'type': NeedleBenchParallelDataset,
'path': base_path,
'needle_file_name': needle_file_name,
'length': original_context_length,
'depths': depths,
'tokenizer_model': 'gpt-4',
'file_list': file_list,
'num_repeats_per_file': 25,
'length_buffer': 200,
'guide': True,
'language': 'Chinese',
'reader_cfg': needlebench_reader_cfg,
'infer_cfg': needlebench_infer_cfg,
'eval_cfg': needlebench_eval_cfg
}
needlebench_datasets_zh.append(dataset_dict)
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.needlebench.parallel import NeedleBenchParallelDataset
from opencompass.datasets.needlebench.parallel import NeedleBenchParallelEvaluator
from opencompass.datasets.needlebench.origin import needlebench_postprocess
from opencompass.datasets.needlebench.origin import needlebench_dataset_postprocess
import math
def logistic(x, L=100, x0=50, k=0.1):
return round(L / (1 + math.exp(-k * (x - x0))), 3)
def generate_linear_space(start, end, num):
if num == 1:
return [start]
elif num < 1:
raise ValueError("num must be at least 1.")
step = (end - start) / (num - 1)
return [start + step * i for i in range(num)]
def generate_depth_percents(intervals, interval_type):
if interval_type == 'linear':
return generate_linear_space(0, 100, intervals)
elif interval_type == 'sigmoid':
linear_space = generate_linear_space(0, 100, intervals)
return [logistic(x) for x in linear_space]
else:
raise ValueError('Unsupported interval type')
needlebench_reader_cfg = dict(input_columns=['prompt'], output_column='answer')
needlebench_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{prompt}'),
dict(role='BOT', prompt='{answer}\n'),
]
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
needlebench_eval_cfg = dict(
evaluator=dict(type=NeedleBenchParallelEvaluator),
pred_postprocessor=dict(type=needlebench_postprocess),
dataset_postprocessor=dict(type=needlebench_dataset_postprocess),
pred_role='BOT')
context_lengths = list(range(5000, 9000, 1000))
document_depth_percent_intervals_list = [1, 5, 10, 15, 20]
document_depth_percent_interval_type = "linear"
base_path = './data/needlebench'
file_list = ['PaulGrahamEssays.jsonl']
needlebench_datasets_en = []
needle_file_name = 'needles.jsonl'
for document_depth_percent_intervals in document_depth_percent_intervals_list:
depths_float = generate_depth_percents(
document_depth_percent_intervals,
document_depth_percent_interval_type)
depths = [int(depth) for depth in depths_float]
for original_context_length in context_lengths:
dataset_dict = {
'abbr': f'Length{original_context_length}'
f'_parallel_en_8k_batch{document_depth_percent_intervals}',
'type': NeedleBenchParallelDataset,
'path': base_path,
'needle_file_name': needle_file_name,
'length': original_context_length,
'depths': depths,
'tokenizer_model': 'gpt-4',
'file_list': file_list,
'num_repeats_per_file': 50,
'length_buffer': 1300,
'guide': True,
'language': 'English',
'reader_cfg': needlebench_reader_cfg,
'infer_cfg': needlebench_infer_cfg,
'eval_cfg': needlebench_eval_cfg
}
needlebench_datasets_en.append(dataset_dict)
file_list = ['zh_finance.jsonl']
needlebench_datasets_zh = []
needle_file_name = 'needles.jsonl'
for document_depth_percent_intervals in document_depth_percent_intervals_list:
depths_float = generate_depth_percents(
document_depth_percent_intervals,
document_depth_percent_interval_type)
depths = [int(depth) for depth in depths_float]
for original_context_length in context_lengths:
dataset_dict = {
'abbr': f'Length{original_context_length}'
f'_parallel_zh_8k_batch{document_depth_percent_intervals}',
'type': NeedleBenchParallelDataset,
'path': base_path,
'needle_file_name': needle_file_name,
'length': original_context_length,
'depths': depths,
'tokenizer_model': 'gpt-4',
'file_list': file_list,
'num_repeats_per_file': 50,
'length_buffer': 200,
'guide': True,
'language': 'Chinese',
'reader_cfg': needlebench_reader_cfg,
'infer_cfg': needlebench_infer_cfg,
'eval_cfg': needlebench_eval_cfg
}
needlebench_datasets_zh.append(dataset_dict)
from opencompass.openicl.icl_prompt_template import PromptTemplate from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.cdme.cdme_multi import CDMEDataset from opencompass.datasets.needlebench.origin import NeedleBenchOriginDataset
from opencompass.datasets.cdme.cdme_multi import CDMEEvaluator from opencompass.datasets.needlebench.origin import NeedleBenchOriginEvaluator
from opencompass.datasets.cdme.cdme_multi import cdme_postprocess from opencompass.datasets.needlebench.origin import needlebench_postprocess
from opencompass.datasets.cdme.cdme_multi import cdme_dataset_postprocess from opencompass.datasets.needlebench.origin import needlebench_dataset_postprocess
import math import math
...@@ -31,47 +31,72 @@ def generate_depth_percents(intervals, interval_type): ...@@ -31,47 +31,72 @@ def generate_depth_percents(intervals, interval_type):
raise ValueError('Unsupported interval type') raise ValueError('Unsupported interval type')
cdme_reader_cfg = dict(input_columns=['prompt'], output_column='answer') needlebench_reader_cfg = dict(input_columns=['prompt'], output_column='answer')
cdme_infer_cfg = dict( needlebench_infer_cfg = dict(
prompt_template=dict( prompt_template=dict(
type=PromptTemplate, type=PromptTemplate,
template='''{prompt}'''), template=dict(
round=[
dict(role='HUMAN', prompt='{prompt}'),
dict(role='BOT', prompt='{answer}\n'),
]
)
),
retriever=dict(type=ZeroRetriever), retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512)) inferencer=dict(type=GenInferencer))
cdme_eval_cfg = dict( needlebench_eval_cfg = dict(
evaluator=dict(type=CDMEEvaluator), evaluator=dict(type=NeedleBenchOriginEvaluator),
pred_postprocessor=dict(type=cdme_postprocess), pred_postprocessor=dict(type=needlebench_postprocess),
dataset_postprocessor=dict(type=cdme_dataset_postprocess), dataset_postprocessor=dict(type=needlebench_dataset_postprocess),
pred_role='BOT') pred_role='BOT')
context_lengths = list(range(1000, 9000, 1000)) context_lengths = list(range(5000, 9000, 1000))
document_depth_percent_intervals = 20 document_depth_percent_intervals = 20
document_depth_percent_interval_type = "linear" document_depth_percent_interval_type = "linear"
base_path = './data/CDME' base_path = './data/needlebench'
file_list = ['zh_finance.jsonl'] file_list = ['PaulGrahamEssays.jsonl']
cdme_datasets = [] needlebench_datasets_en = []
needle_file_name = 'needles.jsonl'
needles = ['\n意大利的佛罗伦萨有一家名为"La Giostra"的餐馆,是整个佛罗伦萨中排行第一的餐馆。\n', for original_context_length in context_lengths:
'"La Giostra"餐馆的特色菜肴是松露奶酪通心粉。', for depth_percent in generate_depth_percents(
'松露奶酪通心粉是该家餐馆的有着意大利皇室烹饪血统的大厨Jack制作',] document_depth_percent_intervals,
retrieval_question = ("制作佛罗伦萨中排行第一的餐馆的特色菜肴的人叫什么?" document_depth_percent_interval_type):
"请按照'制作佛罗伦萨中排行第一的餐馆的特色菜肴的人叫______。'的格式回答。") dataset_dict = {
answer = "制作佛罗伦萨中排行第一的餐馆的特色菜肴的人叫Jack" 'abbr': f'Length{original_context_length}'
f'Depth{int(depth_percent)}_origin_en_8k',
'type': NeedleBenchOriginDataset,
'path': base_path,
'length': original_context_length,
'depth': int(depth_percent),
'tokenizer_model': 'gpt-4',
'file_list': file_list,
'num_repeats_per_file': 10,
'length_buffer': 800,
'guide': True,
'language': 'English',
'needle_file_name': needle_file_name,
'reader_cfg': needlebench_reader_cfg,
'infer_cfg': needlebench_infer_cfg,
'eval_cfg': needlebench_eval_cfg
}
needlebench_datasets_en.append(dataset_dict)
keyword = "Jack" file_list = ['zh_finance.jsonl']
diff = 25 needlebench_datasets_zh = []
needle_file_name = 'needles.jsonl'
for original_context_length in context_lengths: for original_context_length in context_lengths:
for depth_percent in generate_depth_percents( for depth_percent in generate_depth_percents(
document_depth_percent_intervals, document_depth_percent_intervals,
document_depth_percent_interval_type): document_depth_percent_interval_type):
dataset_dict = { dataset_dict = {
'abbr': f'CDME_Length{original_context_length}' 'abbr': f'Length{original_context_length}'
f'Depth{int(depth_percent)}', f'Depth{int(depth_percent)}_origin_zh_8k',
'type': CDMEDataset, 'type': NeedleBenchOriginDataset,
'path': base_path, 'path': base_path,
'length': original_context_length, 'length': original_context_length,
'depth': int(depth_percent), 'depth': int(depth_percent),
...@@ -81,13 +106,9 @@ for original_context_length in context_lengths: ...@@ -81,13 +106,9 @@ for original_context_length in context_lengths:
'length_buffer': 200, 'length_buffer': 200,
'guide': True, 'guide': True,
'language': 'Chinese', 'language': 'Chinese',
'needles': needles, 'needle_file_name': needle_file_name,
'diff': diff, 'reader_cfg': needlebench_reader_cfg,
'retrieval_question': retrieval_question, 'infer_cfg': needlebench_infer_cfg,
'answer': answer, 'eval_cfg': needlebench_eval_cfg
'keyword': keyword,
'reader_cfg': cdme_reader_cfg,
'infer_cfg': cdme_infer_cfg,
'eval_cfg': cdme_eval_cfg
} }
cdme_datasets.append(dataset_dict) needlebench_datasets_zh.append(dataset_dict)
# Needlebench: A Benchmark for Needle-In-A-Haystack Evaluations
English | [简体中文](readme_zh-CN.md)
## Overview
Needlebench is an exhaustive benchmark designed to rigorously assess the information retrieval and reasoning capabilities of large language models (LLMs). Drawing inspiration from the NeedleInAHaystack experiment, Needlebench broadens the scope to include a variety of tasks, each aimed at testing different facets of LLMs' abilities in long-context scenarios.
### Directory Structure
```
configs/datasets/needlebench/
├── atc
├── needlebench_4k
├── needlebench_8k
├── needlebench_32k
├── needlebench_128k
├── needlebench_200k
├── needlebench.py
├── readme.md
└── readme_zh-CN.md
```
Within each configuration directory (e.g., `needlebench_4k`), there are scripts tailored for testing within that specific length setting:
```
needlebench_4k/
├── needlebench_multi_reasoning.py
├── needlebench_multi_retrieval.py
├── needlebench.py
└── needlebench_single.py
```
## Task Descriptions and Length Configurations
Needlebench offers tasks in various length configurations, such as 4k, 8k, etc., to accommodate different scales of language model evaluation needs. Each length configuration provides specialized test scripts for the following tasks:
### Single-Needle Retrieval (`needlebench_single.py`)
The Single-Needle Retrieval task evaluates the LLMs' ability to recall a single piece of crucial information from a haystack text of a specific length. This task mirrors the original NeedleInAHaystack test's objective, assessing the model's precision in identifying and recalling specific information from extended texts.
### Multi-Needle Retrieval (`needlebench_multi_retrieval.py`)
The Multi-Needle Retrieval task challenges the LLMs' ability to identify and extract multiple key information points from extensive texts. It simulates real-world scenarios where multiple data points, facts, or figures need to be retrieved from documents or reports, evaluating the model's efficiency in navigating and extracting relevant information from dense texts.
### Multi-Needle Reasoning (`needlebench_multi_reasoning.py`)
Building on the retrieval tasks, the Multi-Needle Reasoning task emphasizes the LLMs' capacity for complex reasoning with the retrieved information. The model must not only recall multiple pieces of information but also engage in logical reasoning, synthesizing answers that reflect an understanding of the intricate relationships between various information points.
### Ancestral Trace Challenge (ATC)
The Ancestral Trace Challenge is Needlebench's most complex task, requiring models to recall and analyze every detail in long texts for problem-solving that demands an understanding of complex relationships, such as genealogical inquiries or detailed case analysis. This task highlights the need for models to process and reason with information at a granular level, mirroring the demands of sophisticated real-world analytical tasks.
# Needlebench:大海捞针测试评估基准
[English](readme.md) | 简体中文
## 概览
Needlebench是一个全面的基准测试,旨在严格评估大型语言模型(LLMs)的信息检索和推理能力。借鉴了NeedleInAHaystack实验的灵感,Needlebench扩展了范围,包括多种任务,每个任务都旨在测试LLMs处理长文本中关键信息的不同方面的能力。
### 目录结构
```
configs/datasets/needlebench/
├── atc
├── needlebench_4k
├── needlebench_8k
├── needlebench_32k
├── needlebench_128k
├── needlebench_200k
├── needlebench.py
├── readme.md
└── readme_zh-CN.md
```
在每个长度配置目录下(如 `needlebench_4k`),包含了专门针对该长度设置的测试任务脚本:
```
needlebench_4k/
├── needlebench_multi_reasoning.py
├── needlebench_multi_retrieval.py
├── needlebench.py
└── needlebench_single.py
```
## 任务描述与长度配置
Needlebench提供了不同长度配置的任务,如4k、8k等,以适应不同规模的语言模型评估需求。每种长度配置针对以下任务提供了专门的测试脚本:
### 单针信息检索 (`needlebench_single.py`)
单针信息检索任务评估LLMs从特定长度的无关信息文本中回忆单个重要信息的能力。这个任务反映了原始的NeedleInAHaystack测试的目标,评估模型长文本中识别和回忆特定信息的精确性。
### 多针信息检索 (`needlebench_multi_retrieval.py`)
多针信息检索任务挑战LLMs识别和提取广泛文本中的多个关键信息点的能力。它模拟了现实世界中的场景,其中需要从文档或报告中检索多个数据点、事实或数字,评估模型在浏览和从密集文本中提取相关信息的效率。
### 多针信息推理 (`needlebench_multi_reasoning.py`)
在检索任务的基础上,多针信息推理任务强调LLMs使用检索到的信息进行复杂推理的能力。模型不仅需要回忆多个信息点,还需要进行逻辑推理,综合回答反映对不同信息点之间复杂关系理解的答案。
### 祖源追溯挑战 (ATC)
祖源追溯挑战是Needlebench中最复杂的任务,要求模型回忆和分析长文本中的每个细节,以解决需要理解复杂关系的问题,如家谱查询或详细案例分析。这个任务突出了模型处理和推理详细信息的需要,反映了现实世界中对复杂实际任务的要求。
from opencompass.models import HuggingFaceCausalLM
from opencompass.models.turbomind import TurboMindModel
from opencompass.runners import SlurmSequentialRunner
from opencompass.partitioners import SizePartitioner, NaivePartitioner
from opencompass.tasks import OpenICLInferTask, OpenICLEvalTask
from mmengine.config import read_base
with read_base():
# eval needlebench_4k
from .datasets.needlebench.needlebench_4k.needlebench import needlebench_datasets
from .summarizers.needlebench import needlebench_4k_summarizer as summarizer
# only eval original "needle in a haystack test" in needlebench_4k
# from .datasets.needlebench.needlebench_4k.needlebench_single import needlebench_datasets_zh, needlebench_datasets_en
# from .summarizers.needlebench import needlebench_4k_summarizer as summarizer
# eval Ancestral Tracing Challenge(ATC)
# from .datasets.needlebench.atc.atc import needlebench_atc_datasets_zh, needlebench_atc_datasets_en
# from .summarizers.needlebench import needlebench_atc_summarizer as summarizer
datasets = sum([v for k, v in locals().items() if ('datasets' in k)], [])
hf_internlm2_chat_7b_model_meta_template = dict(
round=[
dict(role='HUMAN',
begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n',
end='<|im_end|>\n', generate=True),
],
)
hf_internlm2_chat_7b = dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-7b-hf',
path="internlm/internlm2-chat-7b",
tokenizer_path='internlm/internlm2-chat-7b',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=2000,
max_seq_len=32768,
batch_size=8,
meta_template=hf_internlm2_chat_7b_model_meta_template,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
)
internlm2_chat_7b_200k = dict(
type=TurboMindModel,
abbr='internlm2-chat-7b-200k',
path="internlm/internlm2-chat-7b",
meta_template=hf_internlm2_chat_7b_model_meta_template,
engine_config=dict(session_len=210000,
max_batch_size=8,
rope_scaling_factor=2.0,
model_name="internlm2-chat-7b"),
gen_config=dict(top_k=1, top_p=0.8,
temperature=1.0,
max_new_tokens=2000),
max_out_len=2000,
max_seq_len=210000,
batch_size=8,
concurrency=8,
run_cfg=dict(num_gpus=1, num_procs=1),
)
models = [
# hf_internlm2_chat_7b,
internlm2_chat_7b_200k,
]
work_dir = './outputs/needlebench'
from opencompass.models import HuggingFaceCausalLM
from mmengine.config import read_base
with read_base():
from .datasets.cdme.cdme8k import cdme_datasets
datasets = [*cdme_datasets]
_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|User|>:', end='\n'),
dict(role='BOT', begin='<|Bot|>:', end='<eoa>\n', generate=True),
],
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm-chat-20b-hf',
path="internlm/internlm-chat-20b",
tokenizer_path='internlm/internlm-chat-20b',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=100,
max_seq_len=8192,
batch_size=8,
meta_template=_meta_template,
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='<eoa>',
)
]
from opencompass.models.turbomind import TurboMindModel
from mmengine.config import read_base
with read_base():
from .datasets.cdme.cdme200k import cdme_datasets
datasets = [*cdme_datasets]
internlm_meta_template = dict(round=[
dict(role='HUMAN', begin='<|User|>:', end='\n'),
dict(role='BOT', begin='<|Bot|>:', end='<eoa>\n', generate=True),
],
eos_token_id=103028)
models = [
# config for internlm-chat-20b
dict(
type=TurboMindModel,
abbr='internlm-chat-20b-turbomind',
path='./turbomind',
max_out_len=100,
max_seq_len=201000,
batch_size=8,
concurrency=8,
meta_template=internlm_meta_template,
run_cfg=dict(num_gpus=1, num_procs=1),
)
]
This diff is collapsed.
# flake8: noqa
import json
import random
from datasets import Dataset
from opencompass.datasets.base import BaseDataset
from opencompass.registry import LOAD_DATASET
@LOAD_DATASET.register_module()
class NeedleBenchATCDataset(BaseDataset):
@staticmethod
def load(
path,
num_needles: int,
language: str,
repeats: int,
):
data = {'prompt': [], 'answer': []}
with open(path, 'r', encoding='utf-8') as file:
names_data = json.load(file)
all_names = names_data[language].split(',')
for _ in range(repeats):
names = random.sample(all_names, num_needles)
if language == 'Chinese':
relationship_terms = [
'父亲', '母亲', '爸爸', '妈妈', '爷爷', '奶奶', '姥姥', '姥爷', '外公', '外婆'
]
relationship_templates = [
'{A}是{B}的{relationship}。',
'{B}的{relationship}是{A}。',
'{A}作为{B}的{relationship},对{B}的成长有重要影响。',
'{A}不仅是{B}的{relationship},还是{B}的榜样。',
'{B}是{A}所生的孩子。',
'{A}对{B}来说,不只是一个{relationship},还是一个朋友。',
'{A}在{B}的生命中扮演着{relationship}的角色。',
'{B}把{A}视为其{relationship}。',
]
elif language == 'English':
relationship_terms = [
'father', 'mother', 'dad', 'mom', 'grandfather',
'grandmother', 'maternal grandmother',
'maternal grandfather', 'paternal grandfather',
'paternal grandmother'
]
relationship_templates = [
"{A} is {B}'s {relationship}.",
"{B}'s {relationship} is {A}.",
("{A}, as {B}'s {relationship}, "
"has a significant impact on {B}'s upbringing."),
("{A} is not only {B}'s {relationship} "
"but also {B}'s role model."),
'{B} is the child of {A}.',
('For {B}, {A} is not just a {relationship}, '
'but also a friend.'),
("{A} plays the role of {B}'s {relationship} "
"in {B}'s life."),
'{B} considers {A} as their {relationship}.',
]
def generate_chain_family_story(names, templates,
relationship_terms):
story = ''
for i in range(len(names) - 1):
template = random.choice(templates)
relation_term = random.choice(relationship_terms)
relation = template.format(A=names[i],
B=names[i + 1],
relationship=relation_term)
story += f'{relation}*'
return story
chain_story = generate_chain_family_story(names,
relationship_templates,
relationship_terms)
# Splitting the chain_story into a list of fragments
family_story_fragments = chain_story.split('*')
# Shuffling the list of fragments
random.shuffle(family_story_fragments)
# Joining the shuffled fragments back into a string
shuffled_story = ''.join(family_story_fragments)
last_person = names[-1]
# Generating the prompt based on the language
if language == 'Chinese':
prompt = (f"""
在上面提供的打乱的家族关系文本中,'{last_person}'的能够向上追溯到的最年长的亲人是谁?
例如:
例子1.如果张强的父亲是马克,除此以外提供的文本中没有更多关于亲属关系的信息,那么在提供的文本中张强能够向上追溯到的最年长的亲人就是马克。
例子2.如果李明的姥姥是张红,而张红的父亲是张强,除此以外提供的文本中没有更多关于亲属关系的信息,那么在提供的文本中李明能够向上追溯到的最年长的亲人就是张强。
例子3.如果小明是张红的曾孙女,张红的祖母是王华,王华的父亲是王刚,除此以外提供的文本中没有更多关于亲属关系的信息,那么小明能够向上追溯到的最年长的亲人就是王刚。
""")
elif language == 'English':
prompt = (f"""
Given the scrambled family relationships described above, who is the eldest relative that '{last_person}' can trace back to in the context?
For example:
Example 1: If Zhang Qiang's father is Mark, and no further information about familial relationships is provided in the text, then the oldest relative Zhang Qiang can trace back to in the provided text is Mark.
Example 2: If Li Ming's grandmother is Zhang Hong, and Zhang Hong's father is Zhang Qiang, and no further information about familial relationships is provided in the text, then the oldest relative Li Ming can trace back to in the provided text is Zhang Qiang.
Example 3: If Xiao Ming is Zhang Hong's great-granddaughter, Zhang Hong's grandmother is Wang Hua, and Wang Hua's father is Wang Gang, and no further information about familial relationships is provided in the text, then the oldest relative Xiao Ming can trace back to in the provided text is Wang Gang."""
)
else:
prompt = 'Language not supported.'
raise Exception('Unsupported language specified. '
"Please choose either 'Chinese' or 'English'.")
# Combine story and prompt
shuffled_story_with_prompt = shuffled_story + ' ' + prompt
data['prompt'].append(shuffled_story_with_prompt)
data['answer'].append(names[0] + '*' + names[0])
dataset = Dataset.from_dict({
'prompt': data['prompt'],
'answer': data['answer'],
})
return dataset
@LOAD_DATASET.register_module()
class NeedleBenchATCOrderedDataset(BaseDataset):
@staticmethod
def load(
path,
num_needles: int,
language: str,
repeats: int,
):
data = {'prompt': [], 'answer': []}
with open(path, 'r', encoding='utf-8') as file:
names_data = json.load(file)
all_names = names_data[language].split(',')
for _ in range(repeats):
names = random.sample(all_names, num_needles)
if language == 'Chinese':
relationship_terms = [
'父亲', '母亲', '爸爸', '妈妈', '爷爷', '奶奶', '姥姥', '姥爷', '外公', '外婆'
]
relationship_templates = [
'{A}是{B}的{relationship}。',
'{B}的{relationship}是{A}。',
'{A}作为{B}的{relationship},对{B}的成长有重要影响。',
'{A}不仅是{B}的{relationship},还是{B}的榜样。',
'{B}是{A}所生的孩子。',
'{A}对{B}来说,不只是一个{relationship},还是一个朋友。',
'{A}在{B}的生命中扮演着{relationship}的角色。',
'{B}把{A}视为其{relationship}。',
]
elif language == 'English':
relationship_terms = [
'father', 'mother', 'dad', 'mom', 'grandfather',
'grandmother', 'maternal grandmother',
'maternal grandfather', 'paternal grandfather',
'paternal grandmother'
]
relationship_templates = [
"{A} is {B}'s {relationship}.",
"{B}'s {relationship} is {A}.",
("{A}, as {B}'s {relationship}, "
"has a significant impact on {B}'s upbringing."),
("{A} is not only {B}'s {relationship} "
"but also {B}'s role model."),
'{B} is the child of {A}.',
('For {B}, {A} is not just a {relationship}, '
'but also a friend.'),
("{A} plays the role of {B}'s {relationship} "
"in {B}'s life."),
'{B} considers {A} as their {relationship}.',
]
def generate_chain_family_story(names, templates,
relationship_terms):
story = ''
for i in range(len(names) - 1):
template = random.choice(templates)
relation_term = random.choice(relationship_terms)
relation = template.format(A=names[i],
B=names[i + 1],
relationship=relation_term)
story += f'{relation}*'
return story
chain_story = generate_chain_family_story(names,
relationship_templates,
relationship_terms)
# Splitting the chain_story into a list of fragments
family_story_fragments = chain_story.split('*')
# Joining the shuffled fragments back into a string
shuffled_story = ''.join(family_story_fragments)
last_person = names[-1]
# Generating the prompt based on the language
if language == 'Chinese':
prompt = (f"""
在上面提供的打乱的家族关系文本中,'{last_person}'的能够向上追溯到的最年长的亲人是谁?
例如:
例子1.如果张强的父亲是马克,除此以外提供的文本中没有更多关于亲属关系的信息,那么在提供的文本中张强能够向上追溯到的最年长的亲人就是马克。
例子2.如果李明的姥姥是张红,而张红的父亲是张强,除此以外提供的文本中没有更多关于亲属关系的信息,那么在提供的文本中李明能够向上追溯到的最年长的亲人就是张强。
例子3.如果小明是张红的曾孙女,张红的祖母是王华,王华的父亲是王刚,除此以外提供的文本中没有更多关于亲属关系的信息,那么小明能够向上追溯到的最年长的亲人就是王刚。
""")
elif language == 'English':
prompt = (f"""
Given the scrambled family relationships described above, who is the eldest relative that '{last_person}' can trace back to in the context?
For example:
Example 1: If Zhang Qiang's father is Mark, and no further information about familial relationships is provided in the text, then the oldest relative Zhang Qiang can trace back to in the provided text is Mark.
Example 2: If Li Ming's grandmother is Zhang Hong, and Zhang Hong's father is Zhang Qiang, and no further information about familial relationships is provided in the text, then the oldest relative Li Ming can trace back to in the provided text is Zhang Qiang.
Example 3: If Xiao Ming is Zhang Hong's great-granddaughter, Zhang Hong's grandmother is Wang Hua, and Wang Hua's father is Wang Gang, and no further information about familial relationships is provided in the text, then the oldest relative Xiao Ming can trace back to in the provided text is Wang Gang."""
)
else:
prompt = 'Language not supported.'
raise Exception('Unsupported language specified. '
"Please choose either 'Chinese' or 'English'.")
# Combine story and prompt
shuffled_story_with_prompt = shuffled_story + ' ' + prompt
data['prompt'].append(shuffled_story_with_prompt)
data['answer'].append(names[0] + '*' + names[0])
dataset = Dataset.from_dict({
'prompt': data['prompt'],
'answer': data['answer'],
})
return dataset
import json import json
import os
import random import random
import re
from pathlib import Path from pathlib import Path
import tiktoken import tiktoken
...@@ -8,11 +8,31 @@ from datasets import Dataset ...@@ -8,11 +8,31 @@ from datasets import Dataset
from opencompass.datasets.base import BaseDataset from opencompass.datasets.base import BaseDataset
from opencompass.openicl import BaseEvaluator from opencompass.openicl import BaseEvaluator
from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS from opencompass.registry import LOAD_DATASET
def get_random_needles(file_path, needle_count):
with open(file_path, 'r', encoding='utf-8') as file:
data = json.load(file)
matching_records = [
record for record in data
if record.get('derivation_count') == needle_count
]
if matching_records:
random_record = random.choice(matching_records)
return {
'needles': random_record['derivations'],
'answer': random_record['answer'],
'retrieval_question': random_record['question']
}
else:
return None
@LOAD_DATASET.register_module() @LOAD_DATASET.register_module()
class CDMEDataset(BaseDataset): class NeedleBenchMultiDataset(BaseDataset):
@staticmethod @staticmethod
def load( def load(
...@@ -25,11 +45,9 @@ class CDMEDataset(BaseDataset): ...@@ -25,11 +45,9 @@ class CDMEDataset(BaseDataset):
length_buffer: int, length_buffer: int,
guide: bool, guide: bool,
language: str, language: str,
needles: 'list[str]', needle_file_name: str,
num_needles: int,
diff: int, diff: int,
retrieval_question: str,
answer: str,
keyword: str,
): ):
data = {'prompt': [], 'answer': []} data = {'prompt': [], 'answer': []}
tokenizer = tiktoken.encoding_for_model(tokenizer_model) tokenizer = tiktoken.encoding_for_model(tokenizer_model)
...@@ -73,16 +91,14 @@ class CDMEDataset(BaseDataset): ...@@ -73,16 +91,14 @@ class CDMEDataset(BaseDataset):
def _modify_retrieval_question(retrieval_question): def _modify_retrieval_question(retrieval_question):
if language == 'Chinese': if language == 'Chinese':
parts = retrieval_question.split('请按照') guide_retrieval_question = (retrieval_question +
guide_retrieval_question = (parts[0] + '在回答之前,请思考文档中与此问题' '在回答之前,请思考文档中与此问题'
'最相关的内容是什么。请按照' + parts[1]) '最相关的内容是什么。')
return guide_retrieval_question return guide_retrieval_question
elif language == 'English': elif language == 'English':
parts = retrieval_question.split('Please answer in the format')
guide_retrieval_question = ( guide_retrieval_question = (
parts[0] + 'Before answering, please consider' retrieval_question + 'Before answering, please consider'
' what in the document is most relevant to this question.' ' what in the document is most relevant to this question.')
' Please answer in the format' + parts[1])
return guide_retrieval_question return guide_retrieval_question
else: else:
raise ValueError(f"Language '{language}' is not supported.") raise ValueError(f"Language '{language}' is not supported.")
...@@ -112,6 +128,7 @@ class CDMEDataset(BaseDataset): ...@@ -112,6 +128,7 @@ class CDMEDataset(BaseDataset):
return prompt return prompt
files = Path(path).glob('*.jsonl') files = Path(path).glob('*.jsonl')
needle_file_path = os.path.join(path, needle_file_name)
for file in files: for file in files:
if file.name not in file_list: if file.name not in file_list:
continue continue
...@@ -122,12 +139,20 @@ class CDMEDataset(BaseDataset): ...@@ -122,12 +139,20 @@ class CDMEDataset(BaseDataset):
for counter in range(num_repeats_per_file): for counter in range(num_repeats_per_file):
random.seed(counter) random.seed(counter)
random.shuffle(lines) random.shuffle(lines)
random_needle_data = get_random_needles(
needle_file_path, num_needles)
needles = [
'\n' + needle + '\n'
for needle in random_needle_data['needles']
]
answer = random_needle_data['answer']
keyword = answer
retrieval_question = random_needle_data['retrieval_question']
context_length = length - length_buffer context_length = length - length_buffer
target_length_per_record = context_length - \ target_length_per_record = context_length - \
sum(len(tokens) for tokens sum(len(tokens) for tokens
in _get_tokens_from_context(needles)) in _get_tokens_from_context(needles))
target_length_per_record = max(target_length_per_record, 0)
accumulated_tokens = [] accumulated_tokens = []
for line in lines: for line in lines:
tokens_current_line = _get_tokens_from_context( tokens_current_line = _get_tokens_from_context(
...@@ -154,7 +179,7 @@ class CDMEDataset(BaseDataset): ...@@ -154,7 +179,7 @@ class CDMEDataset(BaseDataset):
return dataset return dataset
class CDMEEvaluator(BaseEvaluator): class NeedleBenchMultiEvaluator(BaseEvaluator):
def levenshtein_distance(self, s1, s2): def levenshtein_distance(self, s1, s2):
if len(s1) < len(s2): if len(s1) < len(s2):
...@@ -175,50 +200,33 @@ class CDMEEvaluator(BaseEvaluator): ...@@ -175,50 +200,33 @@ class CDMEEvaluator(BaseEvaluator):
return previous_row[-1] return previous_row[-1]
def score(self, predictions, references): def score(self, predictions, gold):
if len(predictions) != len(references): if len(predictions) != len(gold):
return { return {'error': 'predictions and gold have different lengths'}
'error': 'predictions and references have different lengths'
}
total_score = 0 total_score = 0
details = [] details = []
for prediction, reference in zip(predictions, references):
keyword = reference.split('*')[1] for prediction, reference in zip(predictions, gold):
reference = reference.split('*')[0] answer, keyword = reference.split('*')
prediction = re.sub(r'\s+', '', prediction) keywords = keyword.lower().split()
reference = re.sub(r'\s+', '', reference) prediction = prediction.lower()
edit_distance = self.levenshtein_distance(prediction, reference)
max_len = max(len(prediction), len(reference)) keyword_score = 100 / len(keywords) if keywords else 0
score = 100 * (1 -
edit_distance / max_len) if max_len != 0 else 100 matched_keywords = sum(1 for kword in keywords
if kword in prediction)
if keyword in prediction: score = matched_keywords * keyword_score
print(f'{keyword} is in {prediction}')
score = 100
else:
print(f'{keyword} is not in {prediction}')
score = 0.2 * score
detail = { detail = {
'pred': prediction, 'pred': prediction,
'answer': reference, 'answer': reference,
'edit_distance': edit_distance, 'matched_keywords': matched_keywords,
'score': score 'score': score
} }
total_score += score total_score += score
details.append(detail) details.append(detail)
average_score = total_score / len(predictions) if predictions else 0 average_score = total_score / len(predictions) if predictions else 0
result = {'score': average_score, 'details': details} return {'score': average_score, 'details': details}
return result
@TEXT_POSTPROCESSORS.register_module('cdme')
def cdme_postprocess(text: str) -> str:
return text
@TEXT_POSTPROCESSORS.register_module('cdme_dataset')
def cdme_dataset_postprocess(text: str) -> str:
return text
import json import json
import os
import random import random
import re import re
from pathlib import Path from pathlib import Path
...@@ -11,8 +12,26 @@ from opencompass.openicl import BaseEvaluator ...@@ -11,8 +12,26 @@ from opencompass.openicl import BaseEvaluator
from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS
def get_random_line_by_language(file_path, language):
with open(file_path, 'r', encoding='utf-8') as file:
lines = [
json.loads(line.strip()) for line in file
if json.loads(line.strip())['language'] == language
]
if lines:
random_line = random.choice(lines)
return {
'needle': random_line['needle'],
'retrieval_question': random_line['retrieval_question'],
'keyword': random_line['arg2']
}
else:
return None
@LOAD_DATASET.register_module() @LOAD_DATASET.register_module()
class CDMEDataset(BaseDataset): class NeedleBenchOriginDataset(BaseDataset):
@staticmethod @staticmethod
def load( def load(
...@@ -25,8 +44,7 @@ class CDMEDataset(BaseDataset): ...@@ -25,8 +44,7 @@ class CDMEDataset(BaseDataset):
length_buffer: int, length_buffer: int,
guide: bool, guide: bool,
language: str, language: str,
needle: str, needle_file_name: str,
retrieval_question: str,
): ):
data = {'prompt': [], 'answer': []} data = {'prompt': [], 'answer': []}
tokenizer = tiktoken.encoding_for_model(tokenizer_model) tokenizer = tiktoken.encoding_for_model(tokenizer_model)
...@@ -96,11 +114,17 @@ class CDMEDataset(BaseDataset): ...@@ -96,11 +114,17 @@ class CDMEDataset(BaseDataset):
for counter in range(num_repeats_per_file): for counter in range(num_repeats_per_file):
random.seed(counter) random.seed(counter)
random.shuffle(lines) random.shuffle(lines)
needle_file_path = os.path.join(path, needle_file_name)
random_needle = get_random_line_by_language(
needle_file_path, language)
needle = '\n' + random_needle['needle'] + '\n'
retrieval_question = random_needle['retrieval_question']
keyword = random_needle['keyword']
context_length = length - length_buffer context_length = length - length_buffer
target_length_per_record = context_length - len( target_length_per_record = context_length - len(
_get_tokens_from_context(needle)) _get_tokens_from_context(needle))
target_length_per_record = max(target_length_per_record, 0)
accumulated_tokens = [] accumulated_tokens = []
for line in lines: for line in lines:
tokens_current_line = _get_tokens_from_context( tokens_current_line = _get_tokens_from_context(
...@@ -118,7 +142,7 @@ class CDMEDataset(BaseDataset): ...@@ -118,7 +142,7 @@ class CDMEDataset(BaseDataset):
retrieval_question) retrieval_question)
data['prompt'].append(processed_prompt) data['prompt'].append(processed_prompt)
data['answer'].append(needle) data['answer'].append(needle + '*' + keyword)
dataset = Dataset.from_dict({ dataset = Dataset.from_dict({
'prompt': data['prompt'], 'prompt': data['prompt'],
...@@ -127,7 +151,7 @@ class CDMEDataset(BaseDataset): ...@@ -127,7 +151,7 @@ class CDMEDataset(BaseDataset):
return dataset return dataset
class CDMEEvaluator(BaseEvaluator): class NeedleBenchOriginEvaluator(BaseEvaluator):
def __init__(self, use_trim=False): def __init__(self, use_trim=False):
self.use_trim = use_trim self.use_trim = use_trim
...@@ -174,20 +198,22 @@ class CDMEEvaluator(BaseEvaluator): ...@@ -174,20 +198,22 @@ class CDMEEvaluator(BaseEvaluator):
return previous_row[-1] return previous_row[-1]
def score(self, predictions, references): def score(self, predictions, gold):
if len(predictions) != len(references):
return { if len(predictions) != len(gold):
'error': 'predictions and references have different lengths' return {'error': 'predictions and gold have different lengths'}
}
total_score = 0 total_score = 0
details = [] details = []
for prediction, reference in zip(predictions, references): for prediction, reference in zip(predictions, gold):
keyword = reference.split('*')[1]
reference = reference.split('*')[0]
raw_prediction = prediction
prediction = re.sub(r'\s+', '', prediction) prediction = re.sub(r'\s+', '', prediction)
reference = re.sub(r'\s+', '', reference) reference = re.sub(r'\s+', '', reference)
if self.use_trim: if self.use_trim:
prediction = CDMEEvaluator._trim_prediction( prediction = NeedleBenchOriginEvaluator._trim_prediction(
prediction, reference) prediction, reference)
edit_distance = self.levenshtein_distance(prediction, reference) edit_distance = self.levenshtein_distance(prediction, reference)
...@@ -195,6 +221,13 @@ class CDMEEvaluator(BaseEvaluator): ...@@ -195,6 +221,13 @@ class CDMEEvaluator(BaseEvaluator):
score = 100 * (1 - score = 100 * (1 -
edit_distance / max_len) if max_len != 0 else 100 edit_distance / max_len) if max_len != 0 else 100
if keyword in raw_prediction:
print(f'{keyword} is in {prediction}')
score = 100
else:
print(f'{keyword} is not in {prediction}')
score = 0.2 * score
detail = { detail = {
'pred': prediction, 'pred': prediction,
'answer': reference, 'answer': reference,
...@@ -209,11 +242,11 @@ class CDMEEvaluator(BaseEvaluator): ...@@ -209,11 +242,11 @@ class CDMEEvaluator(BaseEvaluator):
return result return result
@TEXT_POSTPROCESSORS.register_module('cdme') @TEXT_POSTPROCESSORS.register_module('needlebench')
def cdme_postprocess(text: str) -> str: def needlebench_postprocess(text: str) -> str:
return text return text
@TEXT_POSTPROCESSORS.register_module('cdme_dataset') @TEXT_POSTPROCESSORS.register_module('needlebench_dataset')
def cdme_dataset_postprocess(text: str) -> str: def needlebench_dataset_postprocess(text: str) -> str:
return text return text
import json
import random
from pathlib import Path
import tiktoken
from datasets import Dataset
from opencompass.datasets.base import BaseDataset
from opencompass.openicl import BaseEvaluator
from opencompass.registry import LOAD_DATASET
def get_unique_entries(file_path,
n,
language,
unique_arg1=False,
unique_arg2=False,
unique_combination=False):
seen_arg1 = set()
seen_arg2 = set()
seen_combinations = set()
results = []
with open(file_path, 'r', encoding='utf-8') as file:
lines = file.readlines()
random.shuffle(lines)
for line in lines:
try:
entry = json.loads(line.strip())
except json.JSONDecodeError:
continue
if entry.get('language') != language:
continue
key1 = entry.get('arg1', '') if unique_arg1 else ''
key2 = entry.get('arg2', '') if unique_arg2 else ''
combination = (key1, key2) if unique_combination else ''
if (key1 not in seen_arg1 or not unique_arg1) and \
(key2 not in seen_arg2 or not unique_arg2) and \
(combination not in seen_combinations or not unique_combination):
seen_arg1.add(key1)
seen_arg2.add(key2)
seen_combinations.add(combination)
results.append(entry)
if len(results) == n:
break
return results
@LOAD_DATASET.register_module()
class NeedleBenchParallelDataset(BaseDataset):
@staticmethod
def load(
path: str,
needle_file_name: str,
length: int,
depths: list[int],
tokenizer_model: str,
file_list: list[str],
num_repeats_per_file: int,
length_buffer: int,
guide: bool,
language: str,
):
data = {'prompt': [], 'answer': []}
tokenizer = tiktoken.encoding_for_model(tokenizer_model)
files = Path(path).glob('*.jsonl')
for file in files:
if file.name == needle_file_name:
needle_file_path = file
predefined_needles_bak = get_unique_entries(needle_file_path,
len(depths),
language,
unique_arg1=True,
unique_arg2=True,
unique_combination=True)
def _generate_context(tokens_context, depths, needles):
insertion_points = [
int(len(tokens_context) * (depth / 100)) for depth in depths
]
cumulative_inserted_length = 0
for i, needle in enumerate(needles):
needle_tokens = _get_tokens_from_context(needle)
current_insertion_point = min(
insertion_points[i] + cumulative_inserted_length,
len(tokens_context))
tokens_context = tokens_context[:current_insertion_point] + \
needle_tokens + tokens_context[current_insertion_point:]
cumulative_inserted_length += len(needle_tokens)
new_context = _decode_tokens(tokens_context)
return new_context
def _get_tokens_from_context(context):
if isinstance(context, list):
return [tokenizer.encode(item) for item in context]
else:
return tokenizer.encode(context)
def _decode_tokens(tokens):
return tokenizer.decode(tokens)
def _modify_retrieval_question(retrieval_question):
if language == 'Chinese':
parts = retrieval_question.split('请按照')
guide_retrieval_question = (parts[0] + '在回答之前,请思考文档中与此问题'
'最相关的内容是什么。请按照' + parts[1])
return guide_retrieval_question
elif language == 'English':
parts = retrieval_question.split('Please answer in the format')
guide_retrieval_question = (
parts[0] + 'Before answering, please consider'
' what in the document is most relevant to this question.'
' Please answer in the format' + parts[1])
return guide_retrieval_question
else:
raise ValueError(f"Language '{language}' is not supported.")
def _generate_prompt(context, retrieval_question):
if guide:
retrieval_question = _modify_retrieval_question(
retrieval_question)
if language == 'Chinese':
prompt = ('你是一个善于回答用户问题的智能AI助手\n'
'请保持你的回答简洁清楚。不要说和下面文档中的无关的话'
',或重复你的回答\n请先仔细阅读下面的文档再依次回答'
f'最后提出的问题\n用户现在给你的文档是{context}\n\n'
f'现在请问:{retrieval_question}\n')
elif language == 'English':
prompt = (
'You are an intelligent AI assistant skilled in '
'answering user questions.\n'
'Please keep your answers concise and clear. Do not'
' talk about irrelevant topics or repeat your '
'answers.\n'
f'The document given to you by the user is {context}'
f'\n\nNow, the questions are: {retrieval_question}\n')
else:
raise ValueError(f"Language '{language}' is not supported.")
return prompt
files = Path(path).glob('*.jsonl')
for file in files:
if file.name not in file_list:
continue
with open(file, 'r', encoding='utf-8') as f:
lines_bak = [json.loads(line.strip()) for line in f]
lines = lines_bak.copy()
for counter in range(num_repeats_per_file):
random.seed(counter)
random.shuffle(lines)
predefined_needles = predefined_needles_bak.copy()
random.shuffle(predefined_needles)
needles = [
'\n' + item['needle'] + '\n' for item in predefined_needles
]
keywords = [item['arg2'] for item in predefined_needles]
if language == 'Chinese':
questions = '、'.join([
item['retrieval_question'].split('?')[0] + '?'
for item in predefined_needles
])
answers_format = '、'.join([
item['retrieval_question'].split("'")[1].split('。')[0]
for item in predefined_needles
])
retrieval_question = questions + "请按照'" + \
answers_format + "'的格式回答。"
elif language == 'English':
questions = '、'.join([
item['retrieval_question'].split('?')[0] + '?'
for item in predefined_needles
])
answers_format = '、'.join([
item['retrieval_question'].split("'")[1].split('.')[0]
for item in predefined_needles
])
retrieval_question = questions + \
"Please answer in the format of '" + \
answers_format + "'"
context_length = length - length_buffer
target_length_per_record = context_length - \
sum(len(tokens) for tokens
in _get_tokens_from_context(needles))
target_length_per_record = max(target_length_per_record, 0)
accumulated_tokens = []
for line in lines:
tokens_current_line = _get_tokens_from_context(
line['text'])
accumulated_tokens.extend(tokens_current_line)
if len(accumulated_tokens) >= target_length_per_record:
break
processed_text = _generate_context(
accumulated_tokens[:target_length_per_record], depths,
needles)
processed_prompt = _generate_prompt(processed_text,
retrieval_question)
data['prompt'].append(processed_prompt)
data['answer'].append('*'.join(keywords) + '#' +
'*'.join(map(str, depths)))
dataset = Dataset.from_dict({
'prompt': data['prompt'],
'answer': data['answer'],
})
return dataset
class NeedleBenchParallelEvaluator(BaseEvaluator):
def levenshtein_distance(self, s1, s2):
if len(s1) < len(s2):
return self.levenshtein_distance(s2, s1)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def score(self, predictions, gold):
if len(predictions) != len(gold):
return {'error': 'predictions and gold have different lengths'}
print('predictions:', predictions)
print('gold:', gold)
details = []
depths = [int(i) for i in gold[0].split('#')[1].split('*')]
scores_by_depth = {depth: 0 for depth in depths}
for prediction, reference in zip(predictions, gold):
print(reference)
keywords = reference.split('#')[0].split('*')
print(keywords)
for keyword, depth in zip(keywords, depths):
print('iterating:', keyword, depth)
if keyword in prediction:
print(f'{keyword} at depth {depth} is in {prediction}')
scores_by_depth[depth] += 100 / (len(predictions))
average_score = sum(scores_by_depth.values()) / len(scores_by_depth)
flattened_scores = {
'Depth' + str(depth): score
for depth, score in scores_by_depth.items()
}
result = {
**flattened_scores, 'details': details,
'average_score': average_score
}
return result
This diff is collapsed.
import argparse
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from matplotlib.colors import LinearSegmentedColormap
class CDMEDataset():
@staticmethod
def visualize(path: str, dataset_length: str):
for file_path in path:
df = pd.read_csv(file_path)
df['Context Length'] = df['dataset'].apply(
lambda x: int(x.split('Length')[1].split('Depth')[0]))
df['Document Depth'] = df['dataset'].apply(
lambda x: float(x.split('Depth')[1].split('_')[0]))
# Exclude 'Context Length' and 'Document Depth' columns
model_columns = [
col for col in df.columns
if col not in ['Context Length', 'Document Depth']
]
for model_name in model_columns[4:]:
model_df = df[['Document Depth', 'Context Length',
model_name]].copy()
model_df.rename(columns={model_name: 'Score'}, inplace=True)
# Create pivot table
pivot_table = pd.pivot_table(model_df,
values='Score',
index=['Document Depth'],
columns=['Context Length'],
aggfunc='mean')
# Calculate mean scores
mean_scores = pivot_table.mean().values
# Calculate overall score
overall_score = mean_scores.mean()
# Create heatmap and line plot
plt.figure(figsize=(15.5, 8))
ax = plt.gca()
cmap = LinearSegmentedColormap.from_list(
'custom_cmap', ['#F0496E', '#EBB839', '#0CD79F'])
# Draw heatmap
sns.heatmap(pivot_table,
cmap=cmap,
ax=ax,
cbar_kws={'label': 'Score'},
vmin=0,
vmax=100)
# Set line plot data
x_data = [i + 0.5 for i in range(len(mean_scores))]
y_data = mean_scores
# Create twin axis for line plot
ax2 = ax.twinx()
# Draw line plot
ax2.plot(x_data,
y_data,
color='white',
marker='o',
linestyle='-',
linewidth=2,
markersize=8,
label='Average Depth Score')
# Set y-axis range
ax2.set_ylim(0, 100)
# Hide original y-axis ticks and labels
ax2.set_yticklabels([])
ax2.set_yticks([])
# Add legend
ax2.legend(loc='upper left')
# Set chart title and labels
ax.set_title(f'{model_name} {dataset_length} Context '
'Performance\nFact Retrieval Across '
'Context Lengths ("Needle In A Haystack")')
ax.set_xlabel('Token Limit')
ax.set_ylabel('Depth Percent')
ax.set_xticklabels(pivot_table.columns.values, rotation=45)
ax.set_yticklabels(pivot_table.index.values, rotation=0)
# Add overall score as a subtitle
plt.text(0.5,
-0.13, f'Overall Score for {model_name}: '
f'{overall_score:.2f}',
ha='center',
va='center',
transform=ax.transAxes,
fontsize=13)
# Save heatmap as PNG
png_file_path = file_path.replace('.csv', f'_{model_name}.png')
plt.tight_layout()
plt.subplots_adjust(right=1)
plt.draw()
plt.savefig(png_file_path)
plt.show()
plt.close() # Close figure to prevent memory leaks
# Print saved PNG file path
print(f'Heatmap for {model_name} saved as: {png_file_path}')
def main():
parser = argparse.ArgumentParser(description='Generate NeedleInAHaystack'
'Test Plots')
parser.add_argument('--path',
nargs='*',
default=['path/to/your/result.csv'],
help='Paths to CSV files for visualization')
parser.add_argument('--dataset_length',
default='8K',
type=str,
help='Dataset_length for visualization')
args = parser.parse_args()
if not args.path:
print("Error: '--path' is required for visualization.")
exit(1)
CDMEDataset.visualize(args.path, args.dataset_length)
if __name__ == '__main__':
main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment