"sgl-kernel/vscode:/vscode.git/clone" did not exist on "71d1785f2d0a4424f53caa9f5fa4adcb9a195e30"
test_eval_accuracy_large.py 1.83 KB
Newer Older
1
2
3
4
5
"""
Usage:
python -m unittest test_eval_accuracy_large.TestEvalAccuracyLarge.test_mmlu
"""

6
7
8
import unittest
from types import SimpleNamespace

9
from sglang.srt.utils import kill_process_tree
10
11
12
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
13
14
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
15
16
17
18
19
20
21
22
    popen_launch_server,
)


class TestEvalAccuracyLarge(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        cls.model = DEFAULT_MODEL_NAME_FOR_TEST
23
        cls.base_url = DEFAULT_URL_FOR_TEST
24
25
26
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
27
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
28
29
30
31
32
            other_args=["--log-level-http", "warning"],
        )

    @classmethod
    def tearDownClass(cls):
33
        kill_process_tree(cls.process.pid)
34
35
36
37
38
39

    def test_mmlu(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mmlu",
40
            num_examples=5000,
Lianmin Zheng's avatar
Lianmin Zheng committed
41
            num_threads=1024,
42
43
44
        )

        metrics = run_eval(args)
45
        self.assertGreater(metrics["score"], 0.71)
46
47
48
49
50
51
52

    def test_human_eval(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="humaneval",
            num_examples=None,
Lianmin Zheng's avatar
Lianmin Zheng committed
53
            num_threads=1024,
54
55
56
        )

        metrics = run_eval(args)
57
        self.assertGreater(metrics["score"], 0.64)
58
59
60
61
62
63
64

    def test_mgsm_en(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mgsm_en",
            num_examples=None,
Lianmin Zheng's avatar
Lianmin Zheng committed
65
            num_threads=1024,
66
67
68
        )

        metrics = run_eval(args)
69
        self.assertGreater(metrics["score"], 0.835)
70
71
72
73


if __name__ == "__main__":
    unittest.main()