humaneval_utils.py 12.3 KB
Newer Older
Casper's avatar
Casper committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# Adapted from https://github.com/abacaj/code-eval

import io
import os
import json
import gzip
import torch
import signal
import tempfile
import itertools
import contextlib
import numpy as np
from tqdm import tqdm
import multiprocessing
from datasets import load_dataset
from collections import defaultdict, Counter
from concurrent.futures import ThreadPoolExecutor, as_completed

from typing import (
    List,
    Union,
    Iterable,
    Dict,
    Optional,
)

from transformers import (
    AutoTokenizer,
    PreTrainedModel,
    PreTrainedTokenizer,
)

Casper's avatar
Casper committed
33

Casper's avatar
Casper committed
34
35
36
37
38
39
def eval_humaneval(
    model: PreTrainedModel,
    tokenizer: PreTrainedTokenizer,
    out_path: str = "humaneval_out.jsonl",
    format_tabs: bool = True,
):
Casper's avatar
Casper committed
40
41
42
43
44
    problems = {
        example["task_id"]: example
        for example in load_dataset("openai_humaneval")["test"]
    }

Casper's avatar
Casper committed
45
46
    samples = []

Casper's avatar
Casper committed
47
    for i, (task_id, task) in tqdm(enumerate(problems.items()), total=len(problems)):
Casper's avatar
Casper committed
48
49
50
51
52
        if format_tabs:
            prompt = task["prompt"].replace("    ", "\t")
        else:
            prompt = task["prompt"]

Casper's avatar
Casper committed
53
        batch_completions = generate_batch_completion(model, tokenizer, prompt, 1)
Casper's avatar
Casper committed
54
55
56
57
58
59
60
61
62

        for sample in batch_completions:
            result = dict(
                task_id=task_id,
                completion=sample,
            )

            samples += [result]

Casper's avatar
Casper committed
63
    with open(out_path, "wb") as fp:
Casper's avatar
Casper committed
64
        for x in samples:
Casper's avatar
Casper committed
65
66
            fp.write((json.dumps(x) + "\n").encode("utf-8"))

Casper's avatar
Casper committed
67
68
69
70
71
72
73
74
75
    results = evaluate_functional_correctness(
        sample_file=out_path,
        k=[1],
        n_workers=4,
        timeout=3.0,
    )

    print(results)

Casper's avatar
Casper committed
76

Casper's avatar
Casper committed
77
78
79
80
81
82
83
84
85
86
87
@torch.inference_mode()
def generate_batch_completion(
    model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prompt, batch_size
) -> list[str]:
    input_batch = [prompt for _ in range(batch_size)]
    inputs = tokenizer(input_batch, return_tensors="pt").to(model.model.device)
    input_ids_cutoff = inputs.input_ids.size(dim=1)

    generated_ids = model.generate(
        **inputs,
        use_cache=True,
Casper's avatar
Casper committed
88
        max_seq_len=512,
Casper's avatar
Casper committed
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
        temperature=0.2,
        top_p=0.95,
        do_sample=True,
        eos_token_id=tokenizer.eos_token_id,
        pad_token_id=tokenizer.eos_token_id,
    )

    batch_completions = tokenizer.batch_decode(
        [ids[input_ids_cutoff:] for ids in generated_ids],
        skip_special_tokens=True,
    )

    def filter_code(completion: str) -> str:
        # The program tends to overwrite, we only take the first function
        completion = completion.lstrip("\n")
        return completion.split("\n\n")[0]

    def fix_indents(text: str) -> str:
        return text.replace("\t", "    ")

    return [filter_code(fix_indents(completion)) for completion in batch_completions]


Casper's avatar
Casper committed
112
113
114
def check_correctness(
    problem: Dict, completion: str, timeout: float, completion_id: Optional[int] = None
) -> Dict:
Casper's avatar
Casper committed
115
116
    """
    Evaluates the functional correctness of a completion by running the test
Casper's avatar
Casper committed
117
    suite provided in the problem.
Casper's avatar
Casper committed
118
119
120
121
122
123
124
125
126
127

    :param completion_id: an optional completion ID so we can match
        the results later even if execution finishes asynchronously.
    """

    def unsafe_execute():
        with create_tempdir():
            # These system calls are needed when cleaning up tempdir.
            import os
            import shutil
Casper's avatar
Casper committed
128

Casper's avatar
Casper committed
129
130
131
132
133
134
135
136
137
            rmtree = shutil.rmtree
            rmdir = os.rmdir
            chdir = os.chdir

            # Disable functionalities that can make destructive changes to the test.
            reliability_guard()

            # Construct the check program and run it.
            check_program = (
Casper's avatar
Casper committed
138
139
140
141
142
143
                problem["prompt"]
                + completion
                + "\n"
                + problem["test"]
                + "\n"
                + f"check({problem['entry_point']})"
Casper's avatar
Casper committed
144
            )
Casper's avatar
Casper committed
145

Casper's avatar
Casper committed
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
            try:
                exec_globals = {}
                with swallow_io():
                    with time_limit(timeout):
                        exec(check_program, exec_globals)
                result.append("passed")
            except TimeoutException:
                result.append("timed out")
            except BaseException as e:
                result.append(f"failed: {e}")

            # Needed for cleaning up.
            shutil.rmtree = rmtree
            os.rmdir = rmdir
            os.chdir = chdir

    manager = multiprocessing.Manager()
    result = manager.list()

    p = multiprocessing.Process(target=unsafe_execute)
    p.start()
    p.join(timeout=timeout + 1)
    if p.is_alive():
        p.kill()

    if not result:
        result.append("timed out")

    return dict(
        task_id=problem["task_id"],
        passed=result[0] == "passed",
        result=result[0],
        completion_id=completion_id,
    )


@contextlib.contextmanager
def time_limit(seconds: float):
    def signal_handler(signum, frame):
        raise TimeoutException("Timed out!")
Casper's avatar
Casper committed
186

Casper's avatar
Casper committed
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
    signal.setitimer(signal.ITIMER_REAL, seconds)
    signal.signal(signal.SIGALRM, signal_handler)
    try:
        yield
    finally:
        signal.setitimer(signal.ITIMER_REAL, 0)


@contextlib.contextmanager
def swallow_io():
    stream = WriteOnlyStringIO()
    with contextlib.redirect_stdout(stream):
        with contextlib.redirect_stderr(stream):
            with redirect_stdin(stream):
                yield


@contextlib.contextmanager
def create_tempdir():
    with tempfile.TemporaryDirectory() as dirname:
        with chdir(dirname):
            yield dirname


class TimeoutException(Exception):
    pass


class WriteOnlyStringIO(io.StringIO):
Casper's avatar
Casper committed
216
    """StringIO that throws an exception when it's read from"""
Casper's avatar
Casper committed
217
218
219
220
221
222
223
224
225
226
227

    def read(self, *args, **kwargs):
        raise IOError

    def readline(self, *args, **kwargs):
        raise IOError

    def readlines(self, *args, **kwargs):
        raise IOError

    def readable(self, *args, **kwargs):
Casper's avatar
Casper committed
228
        """Returns True if the IO object can be read."""
Casper's avatar
Casper committed
229
230
231
232
        return False


class redirect_stdin(contextlib._RedirectStream):  # type: ignore
Casper's avatar
Casper committed
233
    _stream = "stdin"
Casper's avatar
Casper committed
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249


@contextlib.contextmanager
def chdir(root):
    if root == ".":
        yield
        return
    cwd = os.getcwd()
    os.chdir(root)
    try:
        yield
    except BaseException as exc:
        raise exc
    finally:
        os.chdir(cwd)

Casper's avatar
Casper committed
250

Casper's avatar
Casper committed
251
252
253
254
255
256
def stream_jsonl(filename: str) -> Iterable[Dict]:
    """
    Parses each jsonl line and yields it as a dictionary
    """
    if filename.endswith(".gz"):
        with open(filename, "rb") as gzfp:
Casper's avatar
Casper committed
257
            with gzip.open(gzfp, "rt") as fp:
Casper's avatar
Casper committed
258
259
260
261
262
263
264
265
266
                for line in fp:
                    if any(not x.isspace() for x in line):
                        yield json.loads(line)
    else:
        with open(filename, "r") as fp:
            for line in fp:
                if any(not x.isspace() for x in line):
                    yield json.loads(line)

Casper's avatar
Casper committed
267

Casper's avatar
Casper committed
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
def estimate_pass_at_k(
    num_samples: Union[int, List[int], np.ndarray],
    num_correct: Union[List[int], np.ndarray],
    k: int,
) -> np.ndarray:
    """
    Estimates pass@k of each problem and returns them in an array.
    """

    def estimator(n: int, c: int, k: int) -> float:
        """
        Calculates 1 - comb(n - c, k) / comb(n, k).
        """
        if n - c < k:
            return 1.0
        return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1))

    if isinstance(num_samples, int):
        num_samples_it = itertools.repeat(num_samples, len(num_correct))
    else:
        assert len(num_samples) == len(num_correct)
        num_samples_it = iter(num_samples)

    return np.array(
        [estimator(int(n), int(c), k) for n, c in zip(num_samples_it, num_correct)]
    )


def evaluate_functional_correctness(
    sample_file: str,
    k: List[int] = [1, 10, 100],
    n_workers: int = 4,
    timeout: float = 3.0,
):
Casper's avatar
Casper committed
302
303
304
305
    problems = {
        example["task_id"]: example
        for example in load_dataset("openai_humaneval")["test"]
    }
Casper's avatar
Casper committed
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324

    # Check the generated samples against test suites.
    with ThreadPoolExecutor(max_workers=n_workers) as executor:
        futures = []
        completion_id = Counter()
        n_samples = 0
        results = defaultdict(list)

        print("Reading samples...")
        for sample in tqdm(stream_jsonl(sample_file)):
            task_id = sample["task_id"]
            completion = sample["completion"]
            args = (problems[task_id], completion, timeout, completion_id[task_id])
            future = executor.submit(check_correctness, *args)
            futures.append(future)
            completion_id[task_id] += 1
            n_samples += 1

        if len(completion_id) < len(problems):
Casper's avatar
Casper committed
325
326
327
328
329
            include_keys = list(problems.keys())[: len(completion_id)]
            print(
                f"Only found {len(completion_id)} solutions, reducing problems from {len(problems)}..."
            )
            problems = {k: v for k, v in problems.items() if k in include_keys}
Casper's avatar
Casper committed
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365

        assert len(completion_id) == len(problems), "Some problems are not attempted."

        print("Running test suites...")
        for future in tqdm(as_completed(futures), total=len(futures)):
            result = future.result()
            results[result["task_id"]].append((result["completion_id"], result))

    # Calculate pass@k.
    total, correct = [], []
    for result in results.values():
        result.sort()
        passed = [r[1]["passed"] for r in result]
        total.append(len(passed))
        correct.append(sum(passed))
    total = np.array(total)
    correct = np.array(correct)

    ks = k
    pass_at_k = {
        f"pass@{k}": estimate_pass_at_k(total, correct, k).mean()
        for k in ks
        if (total >= k).all()
    }

    # Finally, save the results in one file:
    def combine_results():
        for sample in stream_jsonl(sample_file):
            task_id = sample["task_id"]
            result = results[task_id].pop(0)
            sample["result"] = result[1]["result"]
            sample["passed"] = result[1]["passed"]
            yield sample

    return pass_at_k

Casper's avatar
Casper committed
366

Casper's avatar
Casper committed
367
368
369
370
371
372
373
374
def reliability_guard(maximum_memory_bytes: Optional[int] = None):
    """
    This disables various destructive functions and prevents the generated code
    from interfering with the test (e.g. fork bomb, killing other processes,
    removing filesystem files, etc.)

    WARNING
    This function is NOT a security sandbox. Untrusted code, including, model-
Casper's avatar
Casper committed
375
    generated code, should not be blindly executed outside of one. See the
Casper's avatar
Casper committed
376
377
378
379
380
381
382
383
    Codex paper for more information about OpenAI's code sandbox, and proceed
    with caution.
    """

    import platform, faulthandler

    if maximum_memory_bytes is not None:
        import resource
Casper's avatar
Casper committed
384
385
386
387
388
389
390
391
392
393
394

        resource.setrlimit(
            resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes)
        )
        resource.setrlimit(
            resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes)
        )
        if not platform.uname().system == "Darwin":
            resource.setrlimit(
                resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes)
            )
Casper's avatar
Casper committed
395
396
397
398

    faulthandler.disable()

    import builtins
Casper's avatar
Casper committed
399

Casper's avatar
Casper committed
400
401
402
403
    builtins.exit = None
    builtins.quit = None

    import os
Casper's avatar
Casper committed
404
405

    os.environ["OMP_NUM_THREADS"] = "1"
Casper's avatar
Casper committed
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435

    os.kill = None
    os.system = None
    os.putenv = None
    os.remove = None
    os.removedirs = None
    os.rmdir = None
    os.fchdir = None
    os.setuid = None
    os.fork = None
    os.forkpty = None
    os.killpg = None
    os.rename = None
    os.renames = None
    os.truncate = None
    os.replace = None
    os.unlink = None
    os.fchmod = None
    os.fchown = None
    os.chmod = None
    os.chown = None
    os.chroot = None
    os.fchdir = None
    os.lchflags = None
    os.lchmod = None
    os.lchown = None
    os.getcwd = None
    os.chdir = None

    import shutil
Casper's avatar
Casper committed
436

Casper's avatar
Casper committed
437
438
439
440
441
    shutil.rmtree = None
    shutil.move = None
    shutil.chown = None

    import subprocess
Casper's avatar
Casper committed
442

Casper's avatar
Casper committed
443
444
445
446
    subprocess.Popen = None  # type: ignore

    import sys

Casper's avatar
Casper committed
447
448
449
450
451
452
453
454
    sys.modules["ipdb"] = None
    sys.modules["joblib"] = None
    sys.modules["resource"] = None
    sys.modules["psutil"] = None
    sys.modules["tkinter"] = None


if __name__ == "__main__":
Casper's avatar
Casper committed
455
456
457
    os.environ["TOKENIZERS_PARALLELISM"] = "true"

    from awq import AutoAWQForCausalLM
Casper's avatar
Casper committed
458
459
460
461
462

    model_path = "TheBloke/zephyr-7B-beta-AWQ"
    model = AutoAWQForCausalLM.from_quantized(
        model_path, device_map="auto", max_seq_len=2048
    )
Casper's avatar
Casper committed
463
464
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    eval_humaneval(model, tokenizer)