test_torch_compile_moe.py 2.07 KB
Newer Older
1
import time
2
3
4
5
6
import unittest
from types import SimpleNamespace

import requests

7
from sglang.srt.utils import is_cuda, kill_process_tree
8
9
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
10
    DEFAULT_SMALL_MOE_MODEL_NAME_FOR_TEST_BASE,
11
12
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
13
    CustomTestCase,
14
15
16
17
    popen_launch_server,
)


18
class TestTorchCompileMoe(CustomTestCase):
19
20
    @classmethod
    def setUpClass(cls):
21
        cls.model = DEFAULT_SMALL_MOE_MODEL_NAME_FOR_TEST_BASE
22
23
24
25
26
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
27
            other_args=["--enable-torch-compile", "--torch-compile-max-bs", "4"],
28
29
30
31
        )

    @classmethod
    def tearDownClass(cls):
32
        kill_process_tree(cls.process.pid)
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60

    def test_mmlu(self):
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mmlu",
            num_examples=64,
            num_threads=32,
        )

        metrics = run_eval(args)
        self.assertGreaterEqual(metrics["score"], 0.50)

    def run_decode(self, max_new_tokens):
        response = requests.post(
            self.base_url + "/generate",
            json={
                "text": "The capital of France is",
                "sampling_params": {
                    "temperature": 0,
                    "max_new_tokens": max_new_tokens,
                    "ignore_eos": True,
                },
            },
        )
        return response.json()

    def test_throughput(self):
61
62
        # Warmup
        res = self.run_decode(16)
63
64

        max_tokens = 256
65
        tic = time.perf_counter()
66
        res = self.run_decode(max_tokens)
67
        tok = time.perf_counter()
68
69
        print(f"{res=}")
        throughput = max_tokens / (tok - tic)
70
71
72
73
        if is_cuda():
            self.assertGreaterEqual(throughput, 285)
        else:
            self.assertGreaterEqual(throughput, 270)
74
75
76
77


if __name__ == "__main__":
    unittest.main()