vllm_hf_equiv.py 2.25 KB
Newer Older
baberabb's avatar
baberabb committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import argparse
import numpy as np
import lm_eval.evaluator
from lm_eval import tasks
import scipy.stats
from typing import Tuple, Dict

eval_logger = lm_eval.utils.eval_logger


def calculate_z_value(res1: Dict, res2: Dict, limit: int) -> Tuple[float, float]:
    acc1, acc2 = res1["acc,none"], res2["acc,none"]
    st_err1, st_err2 = res1["acc_stderr"], res2["acc_stderr"]
    Z = (acc1 - acc2) / np.sqrt((st_err1**2 / limit) + (st_err2**2 / limit))
    # Determining the p-value
    p_value = 2 * scipy.stats.norm.sf(abs(Z))  # two-tailed test
    return Z, p_value


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--pretrained", default="EleutherAI/pythia-70m", help="name of model to compare"
    )
    parser.add_argument("--hf_args", help="huggingface model args <arg>=<value>")
    parser.add_argument("--vllm_args", help="vllm model args <arg>=<value>")
    parser.add_argument("--tasks", type=str, default="arc_easy,hellaswag")
    parser.add_argument(
        "--samples",
        type=int,
        default=30,
    )
    parser.add_argument(
        "--device",
        type=str,
        default="cuda",
    )
    parser.add_argument(
        "--batch",
        default="auto",
    )
    parser.add_argument(
        "--verbosity",
        type=str,
        default="INFO",
        help="Logging verbosity",
    )
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()

    results_hf = lm_eval.evaluator.simple_evaluate(
        model="hf",
        model_args=f"pretrained={args.pretrained}" + args.hf_args,
        tasks=args.tasks,
        limit=args.limit,
        device=args.device,
        batch=args.batch,
    )
    results_vllm = lm_eval.evaluator.simple_evaluate(
        model="vllm",
        model_args=f"pretrained={args.pretrained}" + args.hf_args,
        tasks=args.tasks,
        limit=args.limit,
        device=args.device,
        batch=args.batch,
    )
    all_res = {}
    for task, res1, task2, res2 in zip(
        results_hf["results"].items(), results_vllm["results"].items()
    ):
        assert task == task2
        z, p_value = calculate_z_value(res1, res2, args.limit)
        all_res["task"] = {"z": z, "p_value": p_value}
        assert p_value > 0.05
        eval_logger.info(all_res)