test_moe_eval_accuracy_large.py 2.39 KB
Newer Older
1
2
3
4
5
"""
Usage:
python -m unittest test_moe_eval_accuracy_large.TestMoEEvalAccuracyLarge.test_mmlu
"""

6
7
8
import unittest
from types import SimpleNamespace

9
from sglang.srt.utils import kill_process_tree
10
11
12
13
14
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
15
    CustomTestCase,
16
    is_in_ci,
17
    popen_launch_server,
18
    write_github_step_summary,
19
20
21
)


22
class TestMoEEvalAccuracyLarge(CustomTestCase):
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
    @classmethod
    def setUpClass(cls):
        cls.model = DEFAULT_MOE_MODEL_NAME_FOR_TEST
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=[
                "--log-level-http",
                "warning",
                "--tp",
                "2",
            ],
        )

    @classmethod
    def tearDownClass(cls):
41
        kill_process_tree(cls.process.pid)
42
43
44
45
46
47

    def test_mmlu(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mmlu",
48
            num_examples=5000,
49
50
51
52
            num_threads=1024,
        )

        metrics = run_eval(args)
53
        self.assertGreater(metrics["score"], 0.62)
54

55
56
57
        if is_in_ci():
            write_github_step_summary(f"### test_mmlu\n" f'{metrics["score"]=:.4f}\n')

58
59
60
61
62
63
64
65
66
67
    def test_human_eval(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="humaneval",
            num_examples=None,
            num_threads=1024,
        )

        metrics = run_eval(args)
68
        self.assertGreater(metrics["score"], 0.40)
69

70
71
72
73
74
        if is_in_ci():
            write_github_step_summary(
                f"### test_human_eval\n" f'{metrics["score"]=:.4f}\n'
            )

75
76
77
78
79
80
81
82
83
84
    def test_mgsm_en(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mgsm_en",
            num_examples=None,
            num_threads=1024,
        )

        metrics = run_eval(args)
85
        self.assertGreater(metrics["score"], 0.61)
86

87
88
89
90
91
        if is_in_ci():
            write_github_step_summary(
                f"### test_mgsm_en\n" f'{metrics["score"]=:.4f}\n'
            )

92
93
94

if __name__ == "__main__":
    unittest.main()