test_eval_accuracy_large.py 2.46 KB
Newer Older
1
2
3
4
5
"""
Usage:
python -m unittest test_eval_accuracy_large.TestEvalAccuracyLarge.test_mmlu
"""

6
7
import os
import time
8
9
10
import unittest
from types import SimpleNamespace

11
from sglang.srt.utils import kill_process_tree
12
13
14
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
15
16
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
17
    CustomTestCase,
18
    is_in_ci,
19
    popen_launch_server,
20
    write_github_step_summary,
21
22
23
)


24
class TestEvalAccuracyLarge(CustomTestCase):
25
26
27
    @classmethod
    def setUpClass(cls):
        cls.model = DEFAULT_MODEL_NAME_FOR_TEST
28
        cls.base_url = DEFAULT_URL_FOR_TEST
29
30
31
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
32
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
33
34
35
36
37
            other_args=["--log-level-http", "warning"],
        )

    @classmethod
    def tearDownClass(cls):
38
        kill_process_tree(cls.process.pid)
39

40
41
42
43
44
    def tearDown(self):
        # Delay between tests to allow GPU memory cleanup
        if os.getenv("SGLANG_AMD_CI") == "1":
            time.sleep(180)

45
46
47
48
49
    def test_mmlu(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mmlu",
50
            num_examples=5000,
Lianmin Zheng's avatar
Lianmin Zheng committed
51
            num_threads=1024,
52
53
54
55
        )

        metrics = run_eval(args)

56
57
58
        if is_in_ci():
            write_github_step_summary(f"### test_mmlu\n" f'{metrics["score"]=:.4f}\n')

Yineng Zhang's avatar
Yineng Zhang committed
59
        self.assertGreater(metrics["score"], 0.70)
60

61
62
63
64
65
66
    def test_human_eval(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="humaneval",
            num_examples=None,
Lianmin Zheng's avatar
Lianmin Zheng committed
67
            num_threads=1024,
68
69
70
71
        )

        metrics = run_eval(args)

72
73
74
75
76
        if is_in_ci():
            write_github_step_summary(
                f"### test_human_eval\n" f'{metrics["score"]=:.4f}\n'
            )

77
78
        self.assertGreater(metrics["score"], 0.64)

79
80
81
82
83
84
    def test_mgsm_en(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mgsm_en",
            num_examples=None,
Lianmin Zheng's avatar
Lianmin Zheng committed
85
            num_threads=1024,
86
87
88
89
        )

        metrics = run_eval(args)

90
91
92
93
94
        if is_in_ci():
            write_github_step_summary(
                f"### test_mgsm_en\n" f'{metrics["score"]=:.4f}\n'
            )

95
96
        self.assertGreater(metrics["score"], 0.835)

97
98
99

if __name__ == "__main__":
    unittest.main()