test_bench_one_batch.py 2.35 KB
Newer Older
1
import os
2
3
4
5
6
import unittest

from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
7
    DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
8
    CustomTestCase,
9
    is_in_ci,
10
    run_bench_offline_throughput,
11
    run_bench_one_batch,
12
    write_github_step_summary,
13
14
)

15
16
17
# We use `run_bench_offline_throughput`` instead of `run_bench_one_batch` for most cases
# because `run_bench_offline_throughput`` has overlap scheduler.

18

19
class TestBenchOneBatch(CustomTestCase):
20
21

    def test_bs1_small(self):
22
        output_throughput = run_bench_one_batch(
23
24
25
26
27
28
            DEFAULT_SMALL_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
        )
        self.assertGreater(output_throughput, 50)

    def test_bs1_default(self):
        output_throughput = run_bench_offline_throughput(
29
30
            DEFAULT_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
        )
31

32
        if is_in_ci():
33
            write_github_step_summary(
34
35
                f"### test_bs1_default (llama-3.1-8b)\n"
                f"output_throughput: {output_throughput:.2f} token/s\n"
36
            )
37
            self.assertGreater(output_throughput, 135)
38

39
    def test_moe_tp2_bs1(self):
40
        output_throughput = run_bench_offline_throughput(
41
            DEFAULT_MOE_MODEL_NAME_FOR_TEST, ["--tp", "2", "--cuda-graph-max-bs", "2"]
42
43
        )

44
        if is_in_ci():
45
            write_github_step_summary(
46
                f"### test_moe_tp2_bs1 (Mixtral-8x7B)\n"
47
                f"output_throughput: {output_throughput:.2f} token/s\n"
48
            )
49
50
51
52
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertGreater(output_throughput, 85)
            else:
                self.assertGreater(output_throughput, 125)
53

54
    def test_torch_compile_tp2_bs1(self):
55
        output_throughput = run_bench_offline_throughput(
56
57
58
59
60
61
            DEFAULT_MODEL_NAME_FOR_TEST,
            ["--tp", "2", "--enable-torch-compile", "--cuda-graph-max-bs", "2"],
        )

        if is_in_ci():
            write_github_step_summary(
62
                f"### test_torch_compile_tp2_bs1 (Mixtral-8x7B)\n"
63
                f"output_throughput: {output_throughput:.2f} token/s\n"
64
            )
kk's avatar
kk committed
65
66
67
68
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertGreater(output_throughput, 200)
            else:
                self.assertGreater(output_throughput, 220)
69

70
71
72

if __name__ == "__main__":
    unittest.main()