test_nightly_vlms_perf.py 5.84 KB
Newer Older
Mick's avatar
Mick committed
1
2
3
4
5
import os
import subprocess
import unittest
import warnings

Mick's avatar
Mick committed
6
from sglang.bench_one_batch_server import BenchmarkResult, generate_markdown_report
Mick's avatar
Mick committed
7
8
9
10
from sglang.srt.utils import kill_process_tree
from sglang.test.test_utils import (
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
11
    ModelLaunchSettings,
Mick's avatar
Mick committed
12
13
14
15
16
17
18
19
20
21
22
    _parse_int_list_env,
    is_in_ci,
    parse_models,
    popen_launch_server,
    write_github_step_summary,
)

PROFILE_DIR = "performance_profiles_vlms"

MODEL_DEFAULTS = [
    # Keep conservative defaults. Can be overridden by env NIGHTLY_VLM_MODELS
23
24
25
26
27
28
29
    ModelLaunchSettings(
        "Qwen/Qwen2.5-VL-7B-Instruct",
        extra_args=["--mem-fraction-static=0.7"],
    ),
    ModelLaunchSettings(
        "google/gemma-3-27b-it",
    ),
Mick's avatar
Mick committed
30
    ModelLaunchSettings("Qwen/Qwen3-VL-30B-A3B-Instruct", extra_args=["--tp=2"]),
Mick's avatar
Mick committed
31
32
33
34
35
36
37
38
39
40
41
42
    # "OpenGVLab/InternVL2_5-2B",
    # buggy in official transformers impl
    # "openbmb/MiniCPM-V-2_6",
]


class TestNightlyVLMModelsPerformance(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        warnings.filterwarnings(
            "ignore", category=ResourceWarning, message="unclosed.*socket"
        )
43
44
45
46
47
48

        nightly_vlm_models_str = os.environ.get("NIGHTLY_VLM_MODELS")
        if nightly_vlm_models_str:
            cls.models = []
            model_paths = parse_models(nightly_vlm_models_str)
            for model_path in model_paths:
Mick's avatar
Mick committed
49
                cls.models.append(ModelLaunchSettings(model_path))
50
51
52
        else:
            cls.models = MODEL_DEFAULTS

Mick's avatar
Mick committed
53
54
55
56
57
58
59
60
61
        cls.base_url = DEFAULT_URL_FOR_TEST

        cls.batch_sizes = _parse_int_list_env("NIGHTLY_VLM_BATCH_SIZES", "1,1,2,8,16")
        cls.input_lens = tuple(_parse_int_list_env("NIGHTLY_VLM_INPUT_LENS", "4096"))
        cls.output_lens = tuple(_parse_int_list_env("NIGHTLY_VLM_OUTPUT_LENS", "512"))
        cls.full_report = f"## {cls.__name__}\n" + BenchmarkResult.help_str()

    def test_bench_one_batch(self):
        all_benchmark_results = []
Mick's avatar
Mick committed
62
        all_model_succeed = True
Mick's avatar
Mick committed
63

64
        for model_setup in self.models:
Mick's avatar
Mick committed
65
            benchmark_results = []
66
            with self.subTest(model=model_setup.model_path):
Mick's avatar
Mick committed
67
                process = popen_launch_server(
68
                    model=model_setup.model_path,
Mick's avatar
Mick committed
69
                    base_url=self.base_url,
70
                    other_args=model_setup.extra_args,
Mick's avatar
Mick committed
71
72
73
74
                    timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
                )
                try:
                    # Run bench_one_batch_server against the launched server
75
                    profile_filename = f"{model_setup.model_path.replace('/', '_')}"
Mick's avatar
Mick committed
76
77
78
79
                    # path for this run
                    profile_path_prefix = os.path.join(PROFILE_DIR, profile_filename)

                    # JSON output file for this model
80
81
82
                    json_output_file = (
                        f"results_{model_setup.model_path.replace('/', '_')}.json"
                    )
Mick's avatar
Mick committed
83
84
85
86
87

                    command = [
                        "python3",
                        "-m",
                        "sglang.bench_one_batch_server",
88
                        f"--model={model_setup.model_path}",
Mick's avatar
Mick committed
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
                        "--base-url",
                        self.base_url,
                        "--batch-size",
                        *[str(x) for x in self.batch_sizes],
                        "--input-len",
                        *[str(x) for x in self.input_lens],
                        "--output-len",
                        *[str(x) for x in self.output_lens],
                        "--trust-remote-code",
                        "--dataset-name=mmmu",
                        "--profile",
                        "--profile-by-stage",
                        f"--profile-filename-prefix={profile_path_prefix}",
                        "--show-report",
                        f"--output-path={json_output_file}",
                        "--no-append-to-github-summary",
                    ]

                    print(f"Running command: {' '.join(command)}")
                    result = subprocess.run(command, capture_output=True, text=True)

                    if result.returncode != 0:
111
112
113
                        print(
                            f"Error running benchmark for {model_setup.model_path} with batch size:"
                        )
Mick's avatar
Mick committed
114
115
116
                        print(result.stderr)
                        continue

117
                    print(f"Output for {model_setup.model_path} with batch size:")
Mick's avatar
Mick committed
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
                    print(result.stdout)

                    # Load and deserialize JSON results
                    if os.path.exists(json_output_file):
                        import json

                        with open(json_output_file, "r") as f:
                            json_data = json.load(f)

                        # Convert JSON data to BenchmarkResult objects
                        for data in json_data:
                            benchmark_result = BenchmarkResult(**data)
                            all_benchmark_results.append(benchmark_result)
                            benchmark_results.append(benchmark_result)

                        print(
                            f"Loaded {len(benchmark_results)} benchmark results from {json_output_file}"
                        )

                    else:
Mick's avatar
Mick committed
138
                        all_model_succeed = False
Mick's avatar
Mick committed
139
140
141
142
143
                        print(f"Warning: JSON output file {json_output_file} not found")

                finally:
                    kill_process_tree(process.pid)

Mick's avatar
Mick committed
144
145
146
                report_part = generate_markdown_report(
                    PROFILE_DIR,
                    benchmark_results,
Mick's avatar
Mick committed
147
148
149
150
151
152
                )
                self.full_report += report_part + "\n"

        if is_in_ci():
            write_github_step_summary(self.full_report)

Mick's avatar
Mick committed
153
154
155
        if not all_model_succeed:
            raise AssertionError("Some models failed the perf tests.")

Mick's avatar
Mick committed
156
157
158

if __name__ == "__main__":
    unittest.main()