server.py 11.5 KB
Newer Older
Lianmin Zheng's avatar
Lianmin Zheng committed
1
2
3
4
"""
The entry point of inference server.
SRT = SGLang Runtime.
"""
5

Lianmin Zheng's avatar
Lianmin Zheng committed
6
import asyncio
Liangsheng Yin's avatar
Liangsheng Yin committed
7
import dataclasses
Lianmin Zheng's avatar
Lianmin Zheng committed
8
import json
9
import logging
Lianmin Zheng's avatar
Lianmin Zheng committed
10
import multiprocessing as mp
Cody Yu's avatar
Cody Yu committed
11
import os
Lianmin Zheng's avatar
Lianmin Zheng committed
12
13
14
import sys
import threading
import time
15
from http import HTTPStatus
Lianmin Zheng's avatar
Lianmin Zheng committed
16
from typing import Optional, Dict
Lianmin Zheng's avatar
Lianmin Zheng committed
17

18
# Fix a bug of Python threading
19
20
setattr(threading, "_register_atexit", lambda *args, **kwargs: None)

Ying Sheng's avatar
Ying Sheng committed
21
import aiohttp
Lianmin Zheng's avatar
Lianmin Zheng committed
22
23
24
25
import psutil
import requests
import uvicorn
import uvloop
26
from fastapi import FastAPI, Request
27
from fastapi.responses import JSONResponse, Response, StreamingResponse
Liangsheng Yin's avatar
Liangsheng Yin committed
28

Lianmin Zheng's avatar
Lianmin Zheng committed
29
from sglang.backend.runtime_endpoint import RuntimeEndpoint
Liangsheng Yin's avatar
Liangsheng Yin committed
30
from sglang.srt.constrained import disable_cache
Ying Sheng's avatar
Ying Sheng committed
31
from sglang.srt.hf_transformers_utils import get_tokenizer
Lianmin Zheng's avatar
Lianmin Zheng committed
32
from sglang.srt.managers.detokenizer_manager import start_detokenizer_process
33
from sglang.srt.managers.io_struct import GenerateReqInput
34
35
from sglang.srt.managers.controller.manager_single import start_controller_process as start_controller_process_single
from sglang.srt.managers.controller.manager_multi import start_controller_process as start_controller_process_multi
Lianmin Zheng's avatar
Lianmin Zheng committed
36
from sglang.srt.managers.tokenizer_manager import TokenizerManager
37
from sglang.srt.openai_api_adapter import (
Liangsheng Yin's avatar
Liangsheng Yin committed
38
39
40
41
    load_chat_template_for_openai_api,
    v1_chat_completions,
    v1_completions,
)
42
from sglang.srt.server_args import ModelPortArgs, PortArgs, ServerArgs
Lianmin Zheng's avatar
Lianmin Zheng committed
43
from sglang.srt.utils import (
Liangsheng Yin's avatar
Liangsheng Yin committed
44
45
    API_KEY_HEADER_NAME,
    APIKeyValidatorMiddleware,
Lianmin Zheng's avatar
Lianmin Zheng committed
46
47
    allocate_init_ports,
    assert_pkg_version,
48
    enable_show_time_cost,
Lianmin Zheng's avatar
Lianmin Zheng committed
49
)
50
51
from sglang.utils import get_exception_traceback

Lianmin Zheng's avatar
Lianmin Zheng committed
52
53
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())

Lianmin Zheng's avatar
Lianmin Zheng committed
54

Lianmin Zheng's avatar
Lianmin Zheng committed
55
56
57
58
app = FastAPI()
tokenizer_manager = None


59
60
61
62
63
64
@app.get("/health")
async def health() -> Response:
    """Health check."""
    return Response(status_code=200)


Lianmin Zheng's avatar
Lianmin Zheng committed
65
66
67
68
69
70
71
@app.get("/get_model_info")
async def get_model_info():
    result = {
        "model_path": tokenizer_manager.model_path,
    }
    return result

Cody Yu's avatar
Cody Yu committed
72

Liangsheng Yin's avatar
Liangsheng Yin committed
73
74
75
76
77
@app.get("/get_server_args")
async def get_server_args():
    return dataclasses.asdict(tokenizer_manager.server_args)


Liangsheng Yin's avatar
Liangsheng Yin committed
78
79
@app.get("/flush_cache")
async def flush_cache():
80
    tokenizer_manager.flush_cache()
Liangsheng Yin's avatar
Liangsheng Yin committed
81
    return Response(
82
83
        content="Cache flushed.\nPlease check backend logs for more details. "
        "(When there are running or waiting requests, the operation will not be performed.)\n",
Liangsheng Yin's avatar
Liangsheng Yin committed
84
85
86
87
        status_code=200,
    )


88
async def generate_request(obj: GenerateReqInput, request: Request):
Lianmin Zheng's avatar
Lianmin Zheng committed
89
    if obj.stream:
90

Lianmin Zheng's avatar
Lianmin Zheng committed
91
        async def stream_results():
92
93
94
95
96
            try:
                async for out in tokenizer_manager.generate_request(obj, request):
                    yield f"data: {json.dumps(out, ensure_ascii=False)}\n\n"
            except ValueError as e:
                out = {"error": {"message": str(e)}}
Lianmin Zheng's avatar
Lianmin Zheng committed
97
98
99
                yield f"data: {json.dumps(out, ensure_ascii=False)}\n\n"
            yield "data: [DONE]\n\n"

100
101
        return StreamingResponse(stream_results(), media_type="text/event-stream",
                                 background=tokenizer_manager.create_abort_task(obj))
102
103
104
105
106
    else:
        try:
            ret = await tokenizer_manager.generate_request(obj, request).__anext__()
            return ret
        except ValueError as e:
107
108
109
110
            return JSONResponse(
                {"error": {"message": str(e)}}, status_code=HTTPStatus.BAD_REQUEST
            )

Lianmin Zheng's avatar
Lianmin Zheng committed
111

Ying Sheng's avatar
Ying Sheng committed
112
113
114
app.post("/generate")(generate_request)
app.put("/generate")(generate_request)

Lianmin Zheng's avatar
Lianmin Zheng committed
115

Lianmin Zheng's avatar
Lianmin Zheng committed
116
@app.post("/v1/completions")
117
118
async def openai_v1_completions(raw_request: Request):
    return await v1_completions(tokenizer_manager, raw_request)
Lianmin Zheng's avatar
Lianmin Zheng committed
119
120


Cody Yu's avatar
Cody Yu committed
121
@app.post("/v1/chat/completions")
122
123
async def openai_v1_chat_completions(raw_request: Request):
    return await v1_chat_completions(tokenizer_manager, raw_request)
124

Lianmin Zheng's avatar
Lianmin Zheng committed
125

Yuanhan Zhang's avatar
Yuanhan Zhang committed
126
def launch_server(server_args: ServerArgs, pipe_finish_writer, model_overide_args=None):
Lianmin Zheng's avatar
Lianmin Zheng committed
127
128
    global tokenizer_manager

129
130
131
132
133
    logging.basicConfig(
        level=getattr(logging, server_args.log_level.upper()),
        format="%(message)s",
    )

Lianmin Zheng's avatar
Lianmin Zheng committed
134
135
    # Set global environments
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
Liangsheng Yin's avatar
Liangsheng Yin committed
136
137
    if server_args.show_time_cost:
        enable_show_time_cost()
138
139
    if server_args.disable_disk_cache:
        disable_cache()
Lianmin Zheng's avatar
Lianmin Zheng committed
140
141
142
143
144
    if server_args.enable_flashinfer:
        assert_pkg_version("flashinfer", "0.0.4")
    if server_args.chat_template:
        # TODO: replace this with huggingface transformers template
        load_chat_template_for_openai_api(server_args.chat_template)
145

Lianmin Zheng's avatar
Lianmin Zheng committed
146
147
    # Allocate ports
    server_args.port, server_args.additional_ports = allocate_init_ports(
148
149
150
151
        server_args.port,
        server_args.additional_ports,
        server_args.tp_size,
        server_args.dp_size,
Lianmin Zheng's avatar
Lianmin Zheng committed
152
    )
153
154
155
156
157
158
159
160
161
162
163

    ports = server_args.additional_ports
    tp = server_args.tp_size
    model_port_args = []
    for i in range(server_args.dp_size):
        model_port_args.append(
            ModelPortArgs(
                nccl_port=ports[3 + i * (tp + 1)],
                model_tp_ports=ports[3 + i * (tp + 1) + 1 : 3 + (i + 1) * (tp + 1)],
            )
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
164
    port_args = PortArgs(
165
166
167
168
        tokenizer_port=ports[0],
        router_port=ports[1],
        detokenizer_port=ports[2],
        model_port_args=model_port_args,
Lianmin Zheng's avatar
Lianmin Zheng committed
169
170
171
    )

    # Launch processes
Yuanhan Zhang's avatar
Yuanhan Zhang committed
172
    tokenizer_manager = TokenizerManager(server_args, port_args, model_overide_args)
Lianmin Zheng's avatar
Lianmin Zheng committed
173
174
175
    pipe_router_reader, pipe_router_writer = mp.Pipe(duplex=False)
    pipe_detoken_reader, pipe_detoken_writer = mp.Pipe(duplex=False)

176
177
178
179
    if server_args.dp_size == 1:
        start_process = start_controller_process_single
    else:
        start_process = start_controller_process_multi
Lianmin Zheng's avatar
Lianmin Zheng committed
180
    proc_router = mp.Process(
181
        target=start_process,
Yuanhan Zhang's avatar
Yuanhan Zhang committed
182
        args=(server_args, port_args, pipe_router_writer, model_overide_args),
Lianmin Zheng's avatar
Lianmin Zheng committed
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
    )
    proc_router.start()
    proc_detoken = mp.Process(
        target=start_detokenizer_process,
        args=(
            server_args,
            port_args,
            pipe_detoken_writer,
        ),
    )
    proc_detoken.start()

    # Wait for the model to finish loading
    router_init_state = pipe_router_reader.recv()
    detoken_init_state = pipe_detoken_reader.recv()

    if router_init_state != "init ok" or detoken_init_state != "init ok":
        proc_router.kill()
        proc_detoken.kill()
Yuanhan Zhang's avatar
Yuanhan Zhang committed
202
203
204
205
206
207
208
        print(
            f"Initialization failed. router_init_state: {router_init_state}", flush=True
        )
        print(
            f"Initialization failed. detoken_init_state: {detoken_init_state}",
            flush=True,
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
209
210
211
        sys.exit(1)
    assert proc_router.is_alive() and proc_detoken.is_alive()

212
213
214
    if server_args.api_key and server_args.api_key != "":
        app.add_middleware(APIKeyValidatorMiddleware, api_key=server_args.api_key)

215
    # Send a warmup request
216
    def _wait_and_warmup():
217
        headers = {}
218
        url = server_args.url()
Lianmin Zheng's avatar
Lianmin Zheng committed
219
        if server_args.api_key:
220
221
            headers[API_KEY_HEADER_NAME] = server_args.api_key

Lianmin Zheng's avatar
Lianmin Zheng committed
222
        # Wait until the server is launched
223
224
        for _ in range(120):
            time.sleep(0.5)
225
            try:
226
                requests.get(url + "/get_model_info", timeout=5, headers=headers)
227
                break
228
            except requests.exceptions.RequestException as e:
229
                pass
Lianmin Zheng's avatar
Lianmin Zheng committed
230

Lianmin Zheng's avatar
Lianmin Zheng committed
231
        # Send a warmup request
Cody Yu's avatar
Cody Yu committed
232
        try:
233
234
235
236
237
238
239
240
241
            for _ in range(server_args.dp_size):
                res = requests.post(
                    url + "/generate",
                    json={
                        "text": "The capital city of France is",
                        "sampling_params": {
                            "temperature": 0,
                            "max_new_tokens": 16,
                        },
242
                    },
243
244
245
246
247
                    headers=headers,
                    timeout=600,
                )
                assert res.status_code == 200
        except Exception:
248
            if pipe_finish_writer is not None:
Lianmin Zheng's avatar
Lianmin Zheng committed
249
250
251
                pipe_finish_writer.send(get_exception_traceback())
            print(f"Initialization failed. warmup error: {e}")
            raise e
Cody Yu's avatar
Cody Yu committed
252
253

        if pipe_finish_writer is not None:
254
            pipe_finish_writer.send("init ok")
Cody Yu's avatar
Cody Yu committed
255

256
257
    t = threading.Thread(target=_wait_and_warmup)
    t.start()
258
259

    # Listen for requests
260
    try:
Lianmin Zheng's avatar
Lianmin Zheng committed
261
262
263
264
265
266
267
268
        uvicorn.run(
            app,
            host=server_args.host,
            port=server_args.port,
            log_level=server_args.log_level,
            timeout_keep_alive=5,
            loop="uvloop",
        )
269
270
    finally:
        t.join()
Lianmin Zheng's avatar
Lianmin Zheng committed
271
272
273


class Runtime:
Lianmin Zheng's avatar
Lianmin Zheng committed
274
275
276
277
278
279
    """
    A wrapper for the server.
    This is used for launching the server in a python program without
    using the commond line interface.
    """

Lianmin Zheng's avatar
Lianmin Zheng committed
280
281
    def __init__(
        self,
282
        log_level: str = "error",
Yuanhan Zhang's avatar
Yuanhan Zhang committed
283
        model_overide_args: Optional[dict] = None,
Lianmin Zheng's avatar
Lianmin Zheng committed
284
285
        *args,
        **kwargs,
Lianmin Zheng's avatar
Lianmin Zheng committed
286
    ):
Lianmin Zheng's avatar
Lianmin Zheng committed
287
        """See the arguments in server_args.py::ServerArgs"""
288
        self.server_args = ServerArgs(*args, log_level=log_level, **kwargs)
Lianmin Zheng's avatar
Lianmin Zheng committed
289
290
291

        # Pre-allocate ports
        self.server_args.port, self.server_args.additional_ports = allocate_init_ports(
Yuanhan Zhang's avatar
Yuanhan Zhang committed
292
293
294
            self.server_args.port,
            self.server_args.additional_ports,
            self.server_args.tp_size,
295
            self.server_args.dp_size,
Yuanhan Zhang's avatar
Yuanhan Zhang committed
296
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
297

Ying Sheng's avatar
Ying Sheng committed
298
299
300
301
        self.url = self.server_args.url()
        self.generate_url = (
            f"http://{self.server_args.host}:{self.server_args.port}/generate"
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
302
303
304

        self.pid = None
        pipe_reader, pipe_writer = mp.Pipe(duplex=False)
Yuanhan Zhang's avatar
Yuanhan Zhang committed
305
306
307
308
        proc = mp.Process(
            target=launch_server,
            args=(self.server_args, pipe_writer, model_overide_args),
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
309
        proc.start()
310
        pipe_writer.close()
Lianmin Zheng's avatar
Lianmin Zheng committed
311
312
        self.pid = proc.pid

313
314
315
316
317
        try:
            init_state = pipe_reader.recv()
        except EOFError:
            init_state = ""

Lianmin Zheng's avatar
Lianmin Zheng committed
318
319
        if init_state != "init ok":
            self.shutdown()
Yuanhan Zhang's avatar
Yuanhan Zhang committed
320
321
322
            raise RuntimeError(
                "Initialization failed. Please see the error messages above."
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
323
324
325
326
327

        self.endpoint = RuntimeEndpoint(self.url)

    def shutdown(self):
        if self.pid is not None:
328
329
330
331
            try:
                parent = psutil.Process(self.pid)
            except psutil.NoSuchProcess:
                return
Lianmin Zheng's avatar
Lianmin Zheng committed
332
333
334
335
336
337
338
339
            children = parent.children(recursive=True)
            for child in children:
                child.kill()
            psutil.wait_procs(children, timeout=5)
            parent.kill()
            parent.wait(timeout=5)
            self.pid = None

Ying Sheng's avatar
Ying Sheng committed
340
341
342
343
344
345
346
347
348
349
    def get_tokenizer(self):
        return get_tokenizer(
            self.server_args.tokenizer_path,
            tokenizer_mode=self.server_args.tokenizer_mode,
            trust_remote_code=self.server_args.trust_remote_code,
        )

    async def add_request(
        self,
        prompt: str,
Lianmin Zheng's avatar
Lianmin Zheng committed
350
        sampling_params: Dict,
Lianmin Zheng's avatar
Lianmin Zheng committed
351
    ):
Ying Sheng's avatar
Ying Sheng committed
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
        json_data = {
            "text": prompt,
            "sampling_params": sampling_params,
            "stream": True,
        }
        pos = 0

        timeout = aiohttp.ClientTimeout(total=3 * 3600)
        async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
            async with session.post(self.generate_url, json=json_data) as response:
                async for chunk, _ in response.content.iter_chunks():
                    chunk = chunk.decode("utf-8")
                    if chunk and chunk.startswith("data:"):
                        if chunk == "data: [DONE]\n\n":
                            break
                        data = json.loads(chunk[5:].strip("\n"))
                        cur = data["text"][pos:]
                        if cur:
                            yield cur
                        pos += len(cur)

Lianmin Zheng's avatar
Lianmin Zheng committed
373
    def __del__(self):
Yuanhan Zhang's avatar
Yuanhan Zhang committed
374
        self.shutdown()