test_bench_one_batch.py 1.6 KB
Newer Older
1
2
3
4
5
import unittest

from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
6
    CustomTestCase,
7
    is_in_ci,
8
    run_bench_one_batch,
9
    write_github_step_summary,
10
11
12
)


13
class TestBenchOneBatch(CustomTestCase):
14
    def test_bs1_default(self):
15
16
17
        output_throughput = run_bench_one_batch(
            DEFAULT_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
        )
18

19
        if is_in_ci():
20
            write_github_step_summary(
21
22
                f"### test_bs1_default (llama-3.1-8b)\n"
                f"output_throughput: {output_throughput:.2f} token/s\n"
23
            )
24
            self.assertGreater(output_throughput, 135)
25

26
    def test_moe_tp2_bs1(self):
27
        output_throughput = run_bench_one_batch(
28
            DEFAULT_MOE_MODEL_NAME_FOR_TEST, ["--tp", "2", "--cuda-graph-max-bs", "2"]
29
30
        )

31
        if is_in_ci():
32
            write_github_step_summary(
Lianmin Zheng's avatar
Lianmin Zheng committed
33
                f"### test_moe_tp2_bs1\n"
34
                f"output_throughput: {output_throughput:.2f} token/s\n"
35
            )
36
            self.assertGreater(output_throughput, 125)
37

38
39
40
41
42
43
44
45
46
    def test_torch_compile_tp2_bs1(self):
        output_throughput = run_bench_one_batch(
            DEFAULT_MODEL_NAME_FOR_TEST,
            ["--tp", "2", "--enable-torch-compile", "--cuda-graph-max-bs", "2"],
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_torch_compile_tp2_bs1\n"
47
                f"output_throughput: {output_throughput:.2f} token/s\n"
48
            )
49
            self.assertGreater(output_throughput, 220)
50

51
52
53

if __name__ == "__main__":
    unittest.main()