decont.py 11.1 KB
Newer Older
dongchy920's avatar
dongchy920 committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
import jsonlines
import requests
import json
import argparse
import multiprocessing as mp
import traceback
import argparse
import tqdm
import time
import tempfile
from datasketch import MinHash, MinHashLSH
import subprocess
import collections
import numpy as np
import hashlib
from func_timeout import func_set_timeout
import pandas as pd
import os
import sys
import xlsxwriter
import itertools
import copy
import re
import numpy as np
import pandas as pd
import math
from sacrebleu import metrics
import ahocorasick
import datasets
import itertools
from pathlib import Path
from utils import utils

DATA_DIR = "./test_data/"


def has_n_gram_overlap(string1, string2, n_gram=10, if_tokenize=False):
    if if_tokenize:
        string1 = nltk.tokenize.word_tokenize(string1)
        string2 = nltk.tokenize.word_tokenize(string2)
        string1 = " ".join(string1)
        string2 = " ".join(string2)
    tokens1 = string1.split()
    tokens2 = string2.split()
    grams1 = set([" ".join(tokens1[i:i + n_gram]) for i in range(len(tokens1) - (n_gram - 1))])
    grams2 = set([" ".join(tokens2[i:i + n_gram]) for i in range(len(tokens2) - (n_gram - 1))])
    overlap = grams1.intersection(grams2)
    return len(overlap) > 0


def has_n_gram_overlap_with_testset(string1, testset, n_gram=10, if_tokenize=False, overlaps=[], verbose=False):
    if if_tokenize:
        string1 = nltk.tokenize.word_tokenize(string1)
        string1 = " ".join(string1)
    tokens1 = string1.split()
    grams1 = set([" ".join(tokens1[i:i + n_gram]) for i in range(len(tokens1) - (n_gram - 1))])
    overlap = grams1.intersection(testset)
    overlaps.extend(list(overlap))
    if len(overlap) > 0 and verbose:
        print(overlap)
    return len(overlap) > 0


def get_n_gram(string, n_gram=10, if_tokenize=False):
    if if_tokenize:
        string1 = nltk.tokenize.word_tokenize(string1)
        string1 = " ".join(string1)
    tokens1 = string.split()
    return set([" ".join(tokens1[i:i + n_gram]) for i in range(len(tokens1) - (n_gram - 1))])


def load_leetcode_test_data():
    data = utils.read_jsonl_file("livecodebench.jsonl")
    samples = []
    for obj in data:
        samples.append({"prompt": obj["prompt"]})
    return samples


def load_humanevalpack_test_data(data_path=f"{DATA_DIR}/humanevalpack"):
    ds1 = datasets.load_dataset(data_path, "python", trust_remote_code=True)["test"]
    ds2 = datasets.load_dataset(data_path, "js", trust_remote_code=True)["test"]
    ds3 = datasets.load_dataset(data_path, "java", trust_remote_code=True)["test"]
    ds4 = datasets.load_dataset(data_path, "go", trust_remote_code=True)["test"]
    ds5 = datasets.load_dataset(data_path, "cpp", trust_remote_code=True)["test"]
    ds6 = datasets.load_dataset(data_path, "rust", trust_remote_code=True)["test"]
    combined_dataset = datasets.concatenate_datasets([ds1, ds2, ds3, ds4, ds5, ds6])
    data = []
    for j, sample in enumerate(combined_dataset):
        data.append(sample)
    return data


def load_multiply_e():
    data = {}
    samples = []
    for lg in ["sh", "ts", "cs", "php", "java", "cpp", "js", "go", "rs"]:
        objs = utils.read_jsonl_file(f"{DATA_DIR}/multiple/data/humaneval-{lg}.jsonl")
        for j, sample in enumerate(objs):
            samples.append({"prompt": sample["prompt"]})
    return samples


def load_mbpp_test_data(data_path=f"{DATA_DIR}/mbpp/"):
    ds = utils.read_jsonl_file(f"{data_path}/mbpp.jsonl")
    data = []
    for j, sample in enumerate(ds):
        data.append(sample)
    return data


def load_ds1000_test_data(data_path="data/ds1000_data/"):

    def extract_ds_1000_prompt(prompt: str):
        if "SOLUTION START" in prompt:
            assert prompt.count("SOLUTION START") == 1
            return prompt.split("SOLUTION START")[0]
        elif "BEGIN SOLUTION" in prompt:
            assert prompt.count("BEGIN SOLUTION") == 1
            return prompt.split("BEGIN SOLUTION")[0]
        else:
            return prompt

    def load_ds_1000(data_path):
        data = []
        for prompt_file in Path(data_path).glob("*/Insertion/q*/prompt.txt"):
            with open(prompt_file) as f:
                data.append(extract_ds_1000_prompt({"insertion": f.read()}))
        return data

    return load_ds_1000(data_path)


def load_codeapex_data():
    ds = utils.read_jsonl_file("data/eval/eval_codeapex_v1.jsonl")
    data = []
    for j, sample in enumerate(ds):
        data.append(sample)
    return data


def get_testset_n_gram(n_gram=10, test_set=["mbpp", "multiple", "humanevalpack"]):
    print("Start Loading decont test set")
    mbpp_data = load_mbpp_test_data()
    humaneval_data = load_humanevalpack_test_data()
    multiply_e_data = load_multiply_e()
    ds1000_data = load_ds1000_test_data()
    codeapex_data = load_codeapex_data()
    #leetcode_data = load_leetcode_test_data()
    print("Successfully Loading decont test set")
    all_grams = set([])
    if "mbpp" in test_set:
        for obj in mbpp_data:
            n_grams = get_n_gram(obj["text"] + "\n" + obj["code"] + "\n".join(obj["test_list"]), n_gram=n_gram)
            all_grams.update(n_grams)
    if "humanevalpack" in test_set:
        for obj in humaneval_data:
            n_grams = get_n_gram(obj["instruction"] + obj["prompt"] + obj["canonical_solution"] + obj["test"], n_gram=n_gram)
            all_grams.update(n_grams)
    if "multiple" in test_set:
        for obj in multiply_e_data:
            n_grams = get_n_gram(obj["prompt"], n_gram=n_gram)
            all_grams.update(n_grams)
    if "ds1000" in test_set:
        for obj in ds1000_data:
            n_grams = get_n_gram(obj["insertion"], n_gram=n_gram)
            all_grams.update(n_grams)
    if "codeapex" in test_set:
        for obj in codeapex_data:
            n_grams = get_n_gram(obj["prompt"], n_gram=n_gram)
            all_grams.update(n_grams)
    # for obj in leetcode_data:
    #     n_grams = get_n_gram(obj["prompt"], n_gram = n_gram)
    #     all_grams.update(n_grams)
    return all_grams


def decontaminate_for_cpt(text, testset_n_gram, testset_func_names, n_gram=10, if_tokenize=False, verbose=False):
    """ 
    True denotes contamination
    """
    if has_n_gram_overlap_with_testset(text, testset_n_gram, n_gram=n_gram, if_tokenize=if_tokenize, verbose=verbose):
        return True
    if contain_func_name(text, testset_func_names):
        return True
    return False


def contain_func_name(text, testset_func_names):
    if f'{extract_func_name(text)}' in testset_func_names:
        return True
    else:
        return False


def extract_func_name(text):
    if re.search(r"def (.*?)\(().*?\)", text) is not None:
        return re.search(r"def (.*?)\(().*?\)", text).group(1).strip()
    if re.search(r"public \w+ \w+ (.*?)\(().*?\)", text) is not None:
        return re.search(r"public \w+ \w+ (.*?)\(().*?\)", text).group(1).strip()
    else:
        return None


def extract_class_name(text):
    if re.search(r"public\w+(\s+)\w*{", text) is not None:
        return re.search(r"def (.*?)\(().*?\)", text).group(1).strip()
    else:
        return None


def get_testset_func_name(datasets=["humaneval", "mbpp"]):
    test_func_names = set()
    if "humaneval" in datasets:
        humaneval_data = load_humanevalpack_test_data()
        test_func_names.update(set([obj["entry_point"] for obj in humaneval_data]))
    if "mbpp" in datasets:
        mbpp_data = load_mbpp_test_data()
        test_func_names.update(set([extract_func_name(obj["code"]) for obj in mbpp_data]))
    return test_func_names


def deduplicate_similar_strings(objs, num_perm=512, jaccard_threshold=0.8):
    """
    # # Example usage
    # strings = ["hello", "h3llo", "helloo", "world", "w0rld", "word", "whirled"]
    # deduplicated = deduplicate_similar_strings(strings, jaccard_threshold=0.8)
    # print(deduplicated)
    """
    # Create an LSH index with a given Jaccard similarity threshold
    lsh = MinHashLSH(threshold=jaccard_threshold, num_perm=num_perm)
    # Create MinHash objects for each string and add to the LSH index
    signatures = {}
    for i, obj in tqdm.tqdm(enumerate(objs)):
        minhash = MinHash(num_perm=num_perm)
        for word in obj["text"].split():
            minhash.update(word.encode('utf8'))
        lsh.insert(f'string_{i}', minhash)
        signatures[f'string_{i}'] = minhash
    unique_strings = []
    processed = set()
    for i, obj in enumerate(objs):
        key = f'string_{i}'
        if key in processed:
            continue
        similar_keys = lsh.query(signatures[key])
        for sim_key in similar_keys:
            processed.add(sim_key)
        unique_strings.append(obj)
    print(f"{len(objs)} -> {len(unique_strings)}")
    return unique_strings


def deduplicate_similar_strings_chatml(objs, num_perm=512, jaccard_threshold=0.6):
    """
    # # Example usage
    # strings = ["hello", "h3llo", "helloo", "world", "w0rld", "word", "whirled"]
    # deduplicated = deduplicate_similar_strings(strings, jaccard_threshold=0.8)
    # print(deduplicated)
    """
    # Create an LSH index with a given Jaccard similarity threshold
    lsh = MinHashLSH(threshold=jaccard_threshold, num_perm=num_perm)
    # Create MinHash objects for each string and add to the LSH index
    signatures = {}
    for i, obj in tqdm.tqdm(enumerate(objs)):
        minhash = MinHash(num_perm=num_perm)
        for word in (obj["messages"][1]["content"] + "\n" + obj["messages"][1]["content"]).split():
            minhash.update(word.encode('utf8'))
        lsh.insert(f'string_{i}', minhash)
        signatures[f'string_{i}'] = minhash
    unique_strings = []
    processed = set()
    for i, obj in enumerate(objs):
        key = f'string_{i}'
        if key in processed:
            continue
        similar_keys = lsh.query(signatures[key])
        for sim_key in similar_keys:
            processed.add(sim_key)
        unique_strings.append(obj)
    return unique_strings


def multi_tasks(objs, workers=64, path="data/system_role/log_gpt.jsonl", task=None, prompt_template=None, chunk_size=None, language=None):
    p = mp.Pool(workers)
    if chunk_size:
        results = []
        job_num = math.ceil(len(objs) / chunk_size)
        print(f"job num: {job_num}")
        for worker_id in range(job_num):
            results.append(p.apply_async(MPLogExceptions(task), args=(objs[worker_id * chunk_size:(worker_id + 1) * chunk_size], worker_id, workers, None, path, prompt_template, language)))
    else:
        chunk_size = math.ceil(len(objs) / float(workers))
        results = []
        for worker_id in range(workers):
            results.append(p.apply_async(MPLogExceptions(task), args=(objs[worker_id * chunk_size:(worker_id + 1) * chunk_size], worker_id, workers, None, path, prompt_template, language)))
    p.close()
    p.join()
    output_objs = []
    for result in results:
        output_objs.extend(result.get())
    return output_objs


if __name__ == "__main__":
    test_n_grams = get_testset_n_gram()
    objs = read_jsonl_file("./sft.jsonl")
    cnt = 0
    for obj in tqdm.tqdm(objs):
        overlaps = []
        dialog = "\n".join([obj["messages"][i]["content"] for i in range(1, len(obj["messages"]))])
        if has_n_gram_overlap_with_testset(obj["text"], test_n_grams, n_gram=10, if_tokenize=False, overlaps=overlaps):
            print(obj["text"])
            cnt += 1
    print(cnt)