test_eval_accuracy_large_chunked_prefill.py 1.75 KB
Newer Older
Lianmin Zheng's avatar
Lianmin Zheng committed
1
2
3
4
5
6
7
import unittest
from types import SimpleNamespace

from sglang.srt.utils import kill_child_process
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
Yineng Zhang's avatar
Yineng Zhang committed
8
    DEFAULT_URL_FOR_ACCURACY_TEST,
Lianmin Zheng's avatar
Lianmin Zheng committed
9
10
11
12
13
14
15
16
    popen_launch_server,
)


class TestEvalAccuracyLargeChunkedPrefill(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        cls.model = DEFAULT_MODEL_NAME_FOR_TEST
Yineng Zhang's avatar
Yineng Zhang committed
17
        cls.base_url = DEFAULT_URL_FOR_ACCURACY_TEST
Lianmin Zheng's avatar
Lianmin Zheng committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=300,
            other_args=["--log-level-http", "warning", "--chunked-prefill-size", "256"],
        )

    @classmethod
    def tearDownClass(cls):
        kill_child_process(cls.process.pid)

    def test_mmlu(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mmlu",
            num_examples=3000,
            num_threads=1024,
        )

        metrics = run_eval(args)
39
        assert metrics["score"] >= 0.705, f"{metrics}"
Lianmin Zheng's avatar
Lianmin Zheng committed
40
41
42
43
44
45
46
47
48
49
50

    def test_human_eval(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="humaneval",
            num_examples=None,
            num_threads=1024,
        )

        metrics = run_eval(args)
Yineng Zhang's avatar
Yineng Zhang committed
51
        assert metrics["score"] >= 0.64, f"{metrics}"
Lianmin Zheng's avatar
Lianmin Zheng committed
52
53
54
55
56
57
58
59
60
61
62

    def test_mgsm_en(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mgsm_en",
            num_examples=None,
            num_threads=1024,
        )

        metrics = run_eval(args)
Yineng Zhang's avatar
Yineng Zhang committed
63
        assert metrics["score"] >= 0.84, f"{metrics}"
Lianmin Zheng's avatar
Lianmin Zheng committed
64
65
66
67


if __name__ == "__main__":
    unittest.main()