test_bench_one_batch.py 1.72 KB
Newer Older
1
2
3
4
5
import unittest

from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
6
    get_bool_env_var,
7
    is_in_ci,
8
    run_bench_one_batch,
9
    write_github_step_summary,
10
11
12
)


13
class TestBenchOneBatch(unittest.TestCase):
14
    def test_bs1(self):
15
16
17
        output_throughput = run_bench_one_batch(
            DEFAULT_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
        )
18

19
        if is_in_ci():
20
21
22
23
            write_github_step_summary(
                f"### test_bs1\n"
                f"output_throughput : {output_throughput:.2f} token/s\n"
            )
24
            self.assertGreater(output_throughput, 135)
25

26
    def test_moe_tp2_bs1(self):
27
        output_throughput = run_bench_one_batch(
28
            DEFAULT_MOE_MODEL_NAME_FOR_TEST, ["--tp", "2", "--cuda-graph-max-bs", "2"]
29
30
        )

31
32
33
34
        use_vllm_custom_allreduce = get_bool_env_var(
            "USE_VLLM_CUSTOM_ALLREDUCE", default="true"
        )

35
        if is_in_ci():
36
            write_github_step_summary(
37
                f"### test_moe_tp2_bs1 ({use_vllm_custom_allreduce=})\n"
38
39
                f"output_throughput : {output_throughput:.2f} token/s\n"
            )
40
            self.assertGreater(output_throughput, 124)
41

42
43
44
45
46
47
48
49
50
51
52
    def test_torch_compile_tp2_bs1(self):
        output_throughput = run_bench_one_batch(
            DEFAULT_MODEL_NAME_FOR_TEST,
            ["--tp", "2", "--enable-torch-compile", "--cuda-graph-max-bs", "2"],
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_torch_compile_tp2_bs1\n"
                f"output_throughput : {output_throughput:.2f} token/s\n"
            )
53
            self.assertGreater(output_throughput, 235)
54

55
56
57

if __name__ == "__main__":
    unittest.main()