test_bench_one_batch.py 2.33 KB
Newer Older
1
import os
2
3
4
5
6
import unittest

from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
7
    DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
8
    CustomTestCase,
9
    is_in_amd_ci,
10
    is_in_ci,
11
    run_bench_offline_throughput,
12
    run_bench_one_batch,
13
    write_github_step_summary,
14
15
)

16
17
18
# We use `run_bench_offline_throughput`` instead of `run_bench_one_batch` for most cases
# because `run_bench_offline_throughput`` has overlap scheduler.

19

20
class TestBenchOneBatch(CustomTestCase):
21
22

    def test_bs1_small(self):
23
        output_throughput = run_bench_one_batch(
24
25
26
27
28
29
            DEFAULT_SMALL_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
        )
        self.assertGreater(output_throughput, 50)

    def test_bs1_default(self):
        output_throughput = run_bench_offline_throughput(
30
31
            DEFAULT_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
        )
32

33
        if is_in_ci():
34
            write_github_step_summary(
35
36
                f"### test_bs1_default (llama-3.1-8b)\n"
                f"output_throughput: {output_throughput:.2f} token/s\n"
37
            )
38
            self.assertGreater(output_throughput, 135)
39

40
    def test_moe_tp2_bs1(self):
41
        output_throughput = run_bench_offline_throughput(
42
            DEFAULT_MOE_MODEL_NAME_FOR_TEST, ["--tp", "2", "--cuda-graph-max-bs", "2"]
43
44
        )

45
        if is_in_ci():
46
            write_github_step_summary(
47
                f"### test_moe_tp2_bs1 (Mixtral-8x7B)\n"
48
                f"output_throughput: {output_throughput:.2f} token/s\n"
49
            )
50
            if is_in_amd_ci():
51
52
53
                self.assertGreater(output_throughput, 85)
            else:
                self.assertGreater(output_throughput, 125)
54

55
    def test_torch_compile_tp2_bs1(self):
56
        output_throughput = run_bench_offline_throughput(
57
58
59
60
61
62
            DEFAULT_MODEL_NAME_FOR_TEST,
            ["--tp", "2", "--enable-torch-compile", "--cuda-graph-max-bs", "2"],
        )

        if is_in_ci():
            write_github_step_summary(
63
                f"### test_torch_compile_tp2_bs1 (Mixtral-8x7B)\n"
64
                f"output_throughput: {output_throughput:.2f} token/s\n"
65
            )
66
            if is_in_amd_ci():
kk's avatar
kk committed
67
68
69
                self.assertGreater(output_throughput, 200)
            else:
                self.assertGreater(output_throughput, 220)
70

71
72
73

if __name__ == "__main__":
    unittest.main()