test_eval_fp8_accuracy.py 3.47 KB
Newer Older
HandH1998's avatar
HandH1998 committed
1
2
3
import unittest
from types import SimpleNamespace

4
from sglang.srt.utils import is_hip, kill_process_tree
HandH1998's avatar
HandH1998 committed
5
6
7
8
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
    DEFAULT_FP8_MODEL_NAME_FOR_ACCURACY_TEST,
    DEFAULT_FP8_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST,
9
    DEFAULT_MODEL_NAME_FOR_TEST,
HandH1998's avatar
HandH1998 committed
10
11
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
12
    CustomTestCase,
HandH1998's avatar
HandH1998 committed
13
14
15
16
    popen_launch_server,
)


17
class TestEvalFP8Accuracy(CustomTestCase):
HandH1998's avatar
HandH1998 committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
    @classmethod
    def setUpClass(cls):
        cls.model = DEFAULT_FP8_MODEL_NAME_FOR_ACCURACY_TEST
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.process = popen_launch_server(
            cls.model, cls.base_url, timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
        )

    @classmethod
    def tearDownClass(cls):
        kill_process_tree(cls.process.pid)

    def test_mmlu(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mmlu",
            num_examples=64,
            num_threads=32,
            temperature=0.1,
        )

        metrics = run_eval(args)
41
42
        if is_hip():
            # Another threshold for AMD because fp8 dtype is difference
Lianmin Zheng's avatar
Lianmin Zheng committed
43
            self.assertGreaterEqual(metrics["score"], 0.60)
44
        else:
Lianmin Zheng's avatar
Lianmin Zheng committed
45
            self.assertGreaterEqual(metrics["score"], 0.60)
HandH1998's avatar
HandH1998 committed
46
47


48
class TestEvalFP8DynamicQuantAccuracy(CustomTestCase):
49
50
51
52
53
54
55
56

    def _run_test(self, model, other_args, expected_score):
        base_url = DEFAULT_URL_FOR_TEST
        other_args = other_args or []

        process = popen_launch_server(
            model,
            base_url,
HandH1998's avatar
HandH1998 committed
57
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
58
            other_args=other_args,
HandH1998's avatar
HandH1998 committed
59
60
        )

61
62
63
64
65
66
67
68
69
        try:
            args = SimpleNamespace(
                base_url=base_url,
                model=model,
                eval_name="mmlu",
                num_examples=64,
                num_threads=32,
                temperature=0.1,
            )
HandH1998's avatar
HandH1998 committed
70

71
72
73
74
75
76
77
78
79
80
81
            metrics = run_eval(args)
            self.assertGreaterEqual(metrics["score"], expected_score)
        finally:
            kill_process_tree(process.pid)

    def test_mmlu_offline_only(self):
        """Test with offline quantization only."""
        self._run_test(
            model=DEFAULT_FP8_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST,
            other_args=[],
            expected_score=0.64,
HandH1998's avatar
HandH1998 committed
82
83
        )

84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
    def test_mmlu_offline_and_online_override(self):
        """Test with both offline and online quantization."""
        self._run_test(
            model=DEFAULT_FP8_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST,
            other_args=["--quantization", "w8a8_fp8"],
            # inference will use sgl kernel w/ online quant override
            # we observed that the accuracy is higher then offline only
            expected_score=0.64,
        )

    def test_mmlu_online_only(self):
        """Test with online quantization only."""
        self._run_test(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            # inference will use sgl kernel w/ online quantization only
            # we observed that the accuracy is higher then offline only
            other_args=["--quantization", "w8a8_fp8"],
            expected_score=0.64,
        )

    def test_mmlu_fp16_baseline(self):
        """Test with unquantized fp16 baseline."""
        self._run_test(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            other_args=[],
            expected_score=0.64,
        )
HandH1998's avatar
HandH1998 committed
111
112
113
114


if __name__ == "__main__":
    unittest.main()