test_launch_server.py 13.5 KB
Newer Older
1
import socket
Byron Hsu's avatar
Byron Hsu committed
2
3
4
5
6
7
8
import subprocess
import time
import unittest
from types import SimpleNamespace

import requests

9
from sglang.srt.utils import kill_process_tree
Byron Hsu's avatar
Byron Hsu committed
10
11
12
13
14
15
16
17
18
19
20
21
22
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
)


def popen_launch_router(
    model: str,
    base_url: str,
    dp_size: int,
    timeout: float,
23
    policy: str = "cache_aware",
24
    max_payload_size: int = None,
25
    api_key: str = None,
26
    log_dir: str = None,
27
28
29
30
    service_discovery: bool = False,
    selector: list = None,
    service_discovery_port: int = 80,
    service_discovery_namespace: str = None,
Byron Hsu's avatar
Byron Hsu committed
31
32
33
34
35
36
37
38
39
):
    """
    Launch the router server process.

    Args:
        model: Model path/name
        base_url: Server base URL
        dp_size: Data parallel size
        timeout: Server launch timeout
40
        policy: Router policy, one of "cache_aware", "round_robin", "random"
41
        max_payload_size: Maximum payload size in bytes
42
        api_key: API key for the router
43
        log_dir: Directory to store log files. If None, logs are only output to console.
44
45
46
47
        service_discovery: Enable Kubernetes service discovery
        selector: List of label selectors in format ["key1=value1", "key2=value2"]
        service_discovery_port: Port to use for service discovery
        service_discovery_namespace: Kubernetes namespace to watch for pods. If None, watches all namespaces.
Byron Hsu's avatar
Byron Hsu committed
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
    """
    _, host, port = base_url.split(":")
    host = host[2:]

    command = [
        "python3",
        "-m",
        "sglang_router.launch_server",
        "--model-path",
        model,
        "--host",
        host,
        "--port",
        port,
        "--dp",
63
        str(dp_size),
64
        "--router-eviction-interval",
65
        "5",
66
67
        "--router-policy",
        policy,
Byron Hsu's avatar
Byron Hsu committed
68
69
    ]

70
71
72
    if api_key is not None:
        command.extend(["--api-key", api_key])

73
74
75
    if max_payload_size is not None:
        command.extend(["--router-max-payload-size", str(max_payload_size)])

76
77
78
79
80
81
82
83
84
85
86
87
88
89
    if service_discovery:
        command.append("--router-service-discovery")

    if selector:
        command.extend(["--router-selector"] + selector)

    if service_discovery_port != 80:
        command.extend(["--router-service-discovery-port", str(service_discovery_port)])

    if service_discovery_namespace:
        command.extend(
            ["--router-service-discovery-namespace", service_discovery_namespace]
        )

90
91
92
    if log_dir is not None:
        command.extend(["--log-dir", log_dir])

93
    process = subprocess.Popen(command, stdout=None, stderr=None)
Byron Hsu's avatar
Byron Hsu committed
94

95
    start_time = time.perf_counter()
Byron Hsu's avatar
Byron Hsu committed
96
    with requests.Session() as session:
97
        while time.perf_counter() - start_time < timeout:
Byron Hsu's avatar
Byron Hsu committed
98
99
100
            try:
                response = session.get(f"{base_url}/health")
                if response.status_code == 200:
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
                    print(f"Router {base_url} is healthy")
                    return process
            except requests.RequestException:
                pass
            time.sleep(10)

    raise TimeoutError("Router failed to start within the timeout period.")


def find_available_port():
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
        s.bind(("127.0.0.1", 0))
        return s.getsockname()[1]


def popen_launch_server(
    model: str,
    base_url: str,
    timeout: float,
):
    _, host, port = base_url.split(":")
    host = host[2:]

    command = [
        "python3",
        "-m",
        "sglang.launch_server",
        "--model-path",
        model,
        "--host",
        host,
        "--port",
        port,
        "--base-gpu-id",
        "1",
    ]

    process = subprocess.Popen(command, stdout=None, stderr=None)

140
141
    # intentionally don't wait and defer the job to the router health check
    return process
Byron Hsu's avatar
Byron Hsu committed
142
143


144
145
146
147
148
149
150
151
152
153
154
155
156
157
def terminate_and_wait(process, timeout=300):
    """Terminate a process and wait until it is terminated.

    Args:
        process: subprocess.Popen object
        timeout: maximum time to wait in seconds

    Raises:
        TimeoutError: if process does not terminate within timeout
    """
    if process is None:
        return

    process.terminate()
158
    start_time = time.perf_counter()
159
160
161

    while process.poll() is None:
        print(f"Terminating process {process.pid}")
162
        if time.perf_counter() - start_time > timeout:
163
164
165
166
167
168
169
170
            raise TimeoutError(
                f"Process {process.pid} failed to terminate within {timeout}s"
            )
        time.sleep(1)

    print(f"Process {process.pid} is successfully terminated")


171
class TestLaunchServer(unittest.TestCase):
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
    def setUp(self):
        self.model = DEFAULT_MODEL_NAME_FOR_TEST
        self.base_url = DEFAULT_URL_FOR_TEST
        self.process = None
        self.other_process = []

    def tearDown(self):
        print("Running tearDown...")
        if self.process:
            terminate_and_wait(self.process)
        for process in self.other_process:
            terminate_and_wait(process)
        print("tearDown done")

    def test_1_mmlu(self):
        print("Running test_1_mmlu...")
188
        # DP size = 2
189
        self.process = popen_launch_router(
190
191
192
193
            self.model,
            self.base_url,
            dp_size=2,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
194
            policy="cache_aware",
195
196
        )

Byron Hsu's avatar
Byron Hsu committed
197
198
199
200
201
202
203
204
205
206
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mmlu",
            num_examples=64,
            num_threads=32,
            temperature=0.1,
        )

        metrics = run_eval(args)
Byron Hsu's avatar
Byron Hsu committed
207
208
209
210
211
        score = metrics["score"]
        THRESHOLD = 0.65
        passed = score >= THRESHOLD
        msg = f"MMLU test {'passed' if passed else 'failed'} with score {score:.3f} (threshold: {THRESHOLD})"
        self.assertGreaterEqual(score, THRESHOLD, msg)
Byron Hsu's avatar
Byron Hsu committed
212

213
214
    def test_2_add_and_remove_worker(self):
        print("Running test_2_add_and_remove_worker...")
215
        # DP size = 1
216
        self.process = popen_launch_router(
217
218
219
220
            self.model,
            self.base_url,
            dp_size=1,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
221
            policy="round_robin",  # use round robin to make sure every worker processes requests
222
        )
223
        # 1. start a worker
224
225
226
227
228
        port = find_available_port()
        worker_url = f"http://127.0.0.1:{port}"
        worker_process = popen_launch_server(
            self.model, worker_url, DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
        )
229
        self.other_process.append(worker_process)
230
231

        # 2. use /add_worker api to add it the the router. It will be used by router after it is healthy
232
233
234
235
        with requests.Session() as session:
            response = session.post(f"{self.base_url}/add_worker?url={worker_url}")
            print(f"status code: {response.status_code}, response: {response.text}")
            self.assertEqual(response.status_code, 200)
236

237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
        # 3. run mmlu
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mmlu",
            num_examples=64,
            num_threads=32,
            temperature=0.1,
        )
        metrics = run_eval(args)
        score = metrics["score"]
        THRESHOLD = 0.65
        passed = score >= THRESHOLD
        msg = f"MMLU test {'passed' if passed else 'failed'} with score {score:.3f} (threshold: {THRESHOLD})"
        self.assertGreaterEqual(score, THRESHOLD, msg)

253
254
255
256
257
258
259
260
261
262
263
264
265
266
        # 4. use /remove_worker api to remove it from the router
        with requests.Session() as session:
            response = session.post(f"{self.base_url}/remove_worker?url={worker_url}")
            print(f"status code: {response.status_code}, response: {response.text}")
            self.assertEqual(response.status_code, 200)

        # 5. run mmlu again
        metrics = run_eval(args)
        score = metrics["score"]
        THRESHOLD = 0.65
        passed = score >= THRESHOLD
        msg = f"MMLU test {'passed' if passed else 'failed'} with score {score:.3f} (threshold: {THRESHOLD})"
        self.assertGreaterEqual(score, THRESHOLD, msg)

267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
    def test_3_lazy_fault_tolerance(self):
        print("Running test_3_lazy_fault_tolerance...")
        # DP size = 1
        self.process = popen_launch_router(
            self.model,
            self.base_url,
            dp_size=1,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            policy="round_robin",
        )

        # 1. start a worker
        port = find_available_port()
        worker_url = f"http://127.0.0.1:{port}"
        worker_process = popen_launch_server(
            self.model, worker_url, DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
        )
        self.other_process.append(worker_process)

        # 2. use /add_worker api to add it the the router. It will be used by router after it is healthy
        with requests.Session() as session:
            response = session.post(f"{self.base_url}/add_worker?url={worker_url}")
            print(f"status code: {response.status_code}, response: {response.text}")
            self.assertEqual(response.status_code, 200)

        # Start a thread to kill the worker after 10 seconds to mimic abrupt worker failure
        def kill_worker():
            time.sleep(10)
            kill_process_tree(worker_process.pid)
            print("Worker process killed")

        import threading

        kill_thread = threading.Thread(target=kill_worker)
        kill_thread.daemon = True
        kill_thread.start()

        # 3. run mmlu
        args = SimpleNamespace(
            base_url=self.base_url,
            model=self.model,
            eval_name="mmlu",
            num_examples=256,
            num_threads=32,
            temperature=0.1,
        )
        metrics = run_eval(args)
        score = metrics["score"]
        THRESHOLD = 0.65
        passed = score >= THRESHOLD
        msg = f"MMLU test {'passed' if passed else 'failed'} with score {score:.3f} (threshold: {THRESHOLD})"
        self.assertGreaterEqual(score, THRESHOLD, msg)

320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
    def test_4_payload_size(self):
        print("Running test_4_payload_size...")
        # Start router with 3MB limit
        self.process = popen_launch_router(
            self.model,
            self.base_url,
            dp_size=1,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            policy="round_robin",
            max_payload_size=1 * 1024 * 1024,  # 1MB limit
        )

        # Test case 1: Payload just under 1MB should succeed
        payload_0_5_mb = {
            "text": "x" * int(0.5 * 1024 * 1024),  # 0.5MB of text
            "temperature": 0.0,
        }

        with requests.Session() as session:
            response = session.post(
                f"{self.base_url}/generate",
                json=payload_0_5_mb,
                headers={"Content-Type": "application/json"},
            )
            self.assertEqual(
                response.status_code,
                200,
                f"0.5MB payload should succeed but got status {response.status_code}",
            )

        # Test case 2: Payload over 1MB should fail
        payload_1_plus_mb = {
            "text": "x" * int((1.2 * 1024 * 1024)),  # 1.2MB of text
            "temperature": 0.0,
        }

        with requests.Session() as session:
            response = session.post(
                f"{self.base_url}/generate",
                json=payload_1_plus_mb,
                headers={"Content-Type": "application/json"},
            )
            self.assertEqual(
                response.status_code,
                413,  # Payload Too Large
                f"1.2MB payload should fail with 413 but got status {response.status_code}",
            )

368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
    def test_5_api_key(self):
        print("Running test_5_api_key...")

        self.process = popen_launch_router(
            self.model,
            self.base_url,
            dp_size=1,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            policy="round_robin",
            api_key="correct_api_key",
        )

        # # Test case 1: request without api key should fail
        with requests.Session() as session:
            response = session.post(
                f"{self.base_url}/generate",
                json={"text": "Kanye west is, ", "temperature": 0},
            )
            print(f"status code: {response.status_code}, response: {response.text}")
            self.assertEqual(
                response.status_code,
                401,
                "Request without api key should fail with 401",
            )

        # Test case 2: request with invalid api key should fail
        with requests.Session() as session:
            response = requests.post(
                f"{self.base_url}/generate",
                json={"text": "Kanye west is, ", "temperature": 0},
                headers={"Authorization": "Bearer 123"},
            )
            print(f"status code: {response.status_code}, response: {response.text}")
            self.assertEqual(
                response.status_code,
                401,
                "Request with invalid api key should fail with 401",
            )

        # Test case 3: request with correct api key should succeed
        with requests.Session() as session:
            response = session.post(
                f"{self.base_url}/generate",
                json={"text": "Kanye west is ", "temperature": 0},
                headers={"Authorization": "Bearer correct_api_key"},
            )
            print(f"status code: {response.status_code}, response: {response.text}")
            self.assertEqual(
                response.status_code, 200, "Request with correct api key should succeed"
            )

Byron Hsu's avatar
Byron Hsu committed
419
420
421

if __name__ == "__main__":
    unittest.main()