test_bench_one_batch.py 2.09 KB
Newer Older
1
2
3
4
5
import unittest

from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
6
    DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
7
    CustomTestCase,
8
    is_in_ci,
9
    run_bench_offline_throughput,
10
    run_bench_one_batch,
11
    write_github_step_summary,
12
13
)

14
15
16
# We use `run_bench_offline_throughput`` instead of `run_bench_one_batch` for most cases
# because `run_bench_offline_throughput`` has overlap scheduler.

17

18
class TestBenchOneBatch(CustomTestCase):
19
20

    def test_bs1_small(self):
21
        output_throughput = run_bench_one_batch(
22
23
24
25
26
27
            DEFAULT_SMALL_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
        )
        self.assertGreater(output_throughput, 50)

    def test_bs1_default(self):
        output_throughput = run_bench_offline_throughput(
28
29
            DEFAULT_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
        )
30

31
        if is_in_ci():
32
            write_github_step_summary(
33
34
                f"### test_bs1_default (llama-3.1-8b)\n"
                f"output_throughput: {output_throughput:.2f} token/s\n"
35
            )
36
            self.assertGreater(output_throughput, 135)
37

38
    def test_moe_tp2_bs1(self):
39
        output_throughput = run_bench_offline_throughput(
40
            DEFAULT_MOE_MODEL_NAME_FOR_TEST, ["--tp", "2", "--cuda-graph-max-bs", "2"]
41
42
        )

43
        if is_in_ci():
44
            write_github_step_summary(
45
                f"### test_moe_tp2_bs1 (Mixtral-8x7B)\n"
46
                f"output_throughput: {output_throughput:.2f} token/s\n"
47
            )
48
            self.assertGreater(output_throughput, 125)
49

50
    def test_torch_compile_tp2_bs1(self):
51
        output_throughput = run_bench_offline_throughput(
52
53
54
55
56
57
            DEFAULT_MODEL_NAME_FOR_TEST,
            ["--tp", "2", "--enable-torch-compile", "--cuda-graph-max-bs", "2"],
        )

        if is_in_ci():
            write_github_step_summary(
58
                f"### test_torch_compile_tp2_bs1 (Mixtral-8x7B)\n"
59
                f"output_throughput: {output_throughput:.2f} token/s\n"
60
            )
61
            self.assertGreater(output_throughput, 220)
62

63
64
65

if __name__ == "__main__":
    unittest.main()