adapter.py 41.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
"""
Copyright 2023-2024 SGLang Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

16
"""Conversion between OpenAI APIs and native SRT APIs"""
Liangsheng Yin's avatar
Liangsheng Yin committed
17

18
import asyncio
19
20
import json
import os
21
22
import time
import uuid
23
from http import HTTPStatus
24
from typing import Dict, List, Optional
25

26
from fastapi import HTTPException, Request, UploadFile
27
from fastapi.responses import JSONResponse, StreamingResponse
28
from pydantic import ValidationError
29
30
31
32
33
34
35
36

from sglang.srt.conversation import (
    Conversation,
    SeparatorStyle,
    chat_template_exists,
    generate_chat_conv,
    register_conv_template,
)
Ying Sheng's avatar
Ying Sheng committed
37
from sglang.srt.managers.io_struct import EmbeddingReqInput, GenerateReqInput
Mingyi's avatar
Mingyi committed
38
from sglang.srt.openai_api.protocol import (
39
40
    BatchRequest,
    BatchResponse,
41
42
43
44
45
    ChatCompletionRequest,
    ChatCompletionResponse,
    ChatCompletionResponseChoice,
    ChatCompletionResponseStreamChoice,
    ChatCompletionStreamResponse,
46
    ChatCompletionTokenLogprob,
47
    ChatMessage,
48
    ChoiceLogprobs,
49
50
51
52
53
54
    CompletionRequest,
    CompletionResponse,
    CompletionResponseChoice,
    CompletionResponseStreamChoice,
    CompletionStreamResponse,
    DeltaMessage,
Ying Sheng's avatar
Ying Sheng committed
55
    EmbeddingObject,
56
57
    EmbeddingRequest,
    EmbeddingResponse,
58
    ErrorResponse,
59
    FileDeleteResponse,
60
61
    FileRequest,
    FileResponse,
62
    LogProbs,
63
    TopLogprob,
64
65
66
67
68
    UsageInfo,
)

chat_template_name = None

Liangsheng Yin's avatar
Liangsheng Yin committed
69

70
71
72
73
74
75
76
77
78
79
class FileMetadata:
    def __init__(self, filename: str, purpose: str):
        self.filename = filename
        self.purpose = purpose


# In-memory storage for batch jobs and files
batch_storage: Dict[str, BatchResponse] = {}
file_id_request: Dict[str, FileMetadata] = {}
file_id_response: Dict[str, FileResponse] = {}
80
# map file id to file path in SGlang backend
81
82
83
84
85
86
87
file_id_storage: Dict[str, str] = {}


# backend storage directory
storage_dir = None


88
89
90
91
92
93
94
95
96
97
98
99
100
def format_finish_reason(finish_reason) -> Optional[str]:
    if finish_reason.startswith("None"):
        return None
    elif finish_reason.startswith("FINISH_MATCHED"):
        return "stop"
    elif finish_reason.startswith("FINISH_LENGTH"):
        return "length"
    elif finish_reason.startswith("FINISH_ABORT"):
        return "abort"
    else:
        return "unknown"


101
102
103
def create_error_response(
    message: str,
    err_type: str = "BadRequestError",
104
105
106
107
    status_code: HTTPStatus = HTTPStatus.BAD_REQUEST,
):
    error = ErrorResponse(message=message, type=err_type, code=status_code.value)
    return JSONResponse(content=error.model_dump(), status_code=error.code)
108
109
110
111
112


def create_streaming_error_response(
    message: str,
    err_type: str = "BadRequestError",
113
114
115
    status_code: HTTPStatus = HTTPStatus.BAD_REQUEST,
) -> str:
    error = ErrorResponse(message=message, type=err_type, code=status_code.value)
116
117
118
119
    json_str = json.dumps({"error": error.model_dump()})
    return json_str


120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
def load_chat_template_for_openai_api(chat_template_arg):
    global chat_template_name

    print(f"Use chat template: {chat_template_arg}")
    if not chat_template_exists(chat_template_arg):
        if not os.path.exists(chat_template_arg):
            raise RuntimeError(
                f"Chat template {chat_template_arg} is not a built-in template name "
                "or a valid chat template file path."
            )
        with open(chat_template_arg, "r") as filep:
            template = json.load(filep)
            try:
                sep_style = SeparatorStyle[template["sep_style"]]
            except KeyError:
                raise ValueError(
                    f"Unknown separator style: {template['sep_style']}"
                ) from None
            register_conv_template(
                Conversation(
                    name=template["name"],
                    system_template=template["system"] + "\n{system_message}",
                    system_message=template.get("system_message", ""),
                    roles=(template["user"], template["assistant"]),
                    sep_style=sep_style,
                    sep=template.get("sep", "\n"),
                    stop_str=template["stop_str"],
                ),
                override=True,
            )
        chat_template_name = template["name"]
    else:
        chat_template_name = chat_template_arg


155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
async def v1_files_create(file: UploadFile, purpose: str, file_storage_pth: str = None):
    try:
        global storage_dir
        if file_storage_pth:
            storage_dir = file_storage_pth
        # Read the file content
        file_content = await file.read()

        # Create an instance of RequestBody
        request_body = FileRequest(file=file_content, purpose=purpose)

        # Save the file to the sglang_oai_storage directory
        os.makedirs(storage_dir, exist_ok=True)
        file_id = f"backend_input_file-{uuid.uuid4()}"
        filename = f"{file_id}.jsonl"
        file_path = os.path.join(storage_dir, filename)

        with open(file_path, "wb") as f:
            f.write(request_body.file)

        # add info to global file map
        file_id_request[file_id] = FileMetadata(filename=file.filename, purpose=purpose)
        file_id_storage[file_id] = file_path

        # Return the response in the required format
        response = FileResponse(
            id=file_id,
            bytes=len(request_body.file),
            created_at=int(time.time()),
            filename=file.filename,
            purpose=request_body.purpose,
        )
        file_id_response[file_id] = response

        return response
    except ValidationError as e:
        return {"error": "Invalid input", "details": e.errors()}


194
195
196
197
198
199
200
201
202
203
204
205
206
207
async def v1_delete_file(file_id: str):
    # Retrieve the file job from the in-memory storage
    file_response = file_id_response.get(file_id)
    if file_response is None:
        raise HTTPException(status_code=404, detail="File not found")
    file_path = file_id_storage.get(file_id)
    if file_path is None:
        raise HTTPException(status_code=404, detail="File not found")
    os.remove(file_path)
    del file_id_response[file_id]
    del file_id_storage[file_id]
    return FileDeleteResponse(id=file_id, deleted=True)


208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
async def v1_batches(tokenizer_manager, raw_request: Request):
    try:
        body = await raw_request.json()

        batch_request = BatchRequest(**body)

        batch_id = f"batch_{uuid.uuid4()}"

        # Create an instance of BatchResponse
        batch_response = BatchResponse(
            id=batch_id,
            endpoint=batch_request.endpoint,
            input_file_id=batch_request.input_file_id,
            completion_window=batch_request.completion_window,
            created_at=int(time.time()),
            metadata=batch_request.metadata,
        )

        batch_storage[batch_id] = batch_response

        # Start processing the batch asynchronously
        asyncio.create_task(process_batch(tokenizer_manager, batch_id, batch_request))

        # Return the initial batch_response
        return batch_response

    except ValidationError as e:
        return {"error": "Invalid input", "details": e.errors()}
    except Exception as e:
        return {"error": str(e)}


async def process_batch(tokenizer_manager, batch_id: str, batch_request: BatchRequest):
    try:
        # Update the batch status to "in_progress"
        batch_storage[batch_id].status = "in_progress"
        batch_storage[batch_id].in_progress_at = int(time.time())

        # Retrieve the input file content
        input_file_request = file_id_request.get(batch_request.input_file_id)
        if not input_file_request:
            raise ValueError("Input file not found")

        # Parse the JSONL file and process each request
        input_file_path = file_id_storage.get(batch_request.input_file_id)
        with open(input_file_path, "r", encoding="utf-8") as f:
            lines = f.readlines()

        total_requests = len(lines)
        completed_requests = 0
        failed_requests = 0

        all_ret = []
        end_point = batch_storage[batch_id].endpoint
        file_request_list = []
        all_requests = []
        for line in lines:
            request_data = json.loads(line)
            file_request_list.append(request_data)
            body = request_data["body"]
            if end_point == "/v1/chat/completions":
                all_requests.append(ChatCompletionRequest(**body))
            elif end_point == "/v1/completions":
                all_requests.append(CompletionRequest(**body))
        if end_point == "/v1/chat/completions":
            adapted_request, request = v1_chat_generate_request(
                all_requests, tokenizer_manager
            )
        elif end_point == "/v1/completions":
            adapted_request, request = v1_generate_request(all_requests)
        try:
            ret = await tokenizer_manager.generate_request(adapted_request).__anext__()
            if not isinstance(ret, list):
                ret = [ret]
            if end_point == "/v1/chat/completions":
                responses = v1_chat_generate_response(request, ret, to_file=True)
            else:
yichuan~'s avatar
yichuan~ committed
285
286
287
                responses = v1_generate_response(
                    request, ret, tokenizer_manager, to_file=True
                )
288
289
290
291
292
293
294
295
296
297
298
299

        except Exception as e:
            error_json = {
                "id": f"batch_req_{uuid.uuid4()}",
                "custom_id": request_data.get("custom_id"),
                "response": None,
                "error": {"message": str(e)},
            }
            all_ret.append(error_json)
            failed_requests += len(file_request_list)

        for idx, response in enumerate(responses):
300
            # the batch_req here can be changed to be named within a batch granularity
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
            response_json = {
                "id": f"batch_req_{uuid.uuid4()}",
                "custom_id": file_request_list[idx].get("custom_id"),
                "response": response,
                "error": None,
            }
            all_ret.append(response_json)
            completed_requests += 1
        # Write results to a new file
        output_file_id = f"backend_result_file-{uuid.uuid4()}"
        global storage_dir
        output_file_path = os.path.join(storage_dir, f"{output_file_id}.jsonl")
        with open(output_file_path, "w", encoding="utf-8") as f:
            for ret in all_ret:
                f.write(json.dumps(ret) + "\n")

        # Update batch response with output file information
        retrieve_batch = batch_storage[batch_id]
        retrieve_batch.output_file_id = output_file_id
        file_id_storage[output_file_id] = output_file_path
321
322
323
324
325
326
327
        file_id_response[output_file_id] = FileResponse(
            id=output_file_id,
            bytes=os.path.getsize(output_file_path),
            created_at=int(time.time()),
            filename=f"{output_file_id}.jsonl",
            purpose="batch_result",
        )
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
        # Update batch status to "completed"
        retrieve_batch.status = "completed"
        retrieve_batch.completed_at = int(time.time())
        retrieve_batch.request_counts = {
            "total": total_requests,
            "completed": completed_requests,
            "failed": failed_requests,
        }

    except Exception as e:
        print("error in SGlang:", e)
        # Update batch status to "failed"
        retrieve_batch = batch_storage[batch_id]
        retrieve_batch.status = "failed"
        retrieve_batch.failed_at = int(time.time())
        retrieve_batch.errors = {"message": str(e)}


async def v1_retrieve_batch(batch_id: str):
    # Retrieve the batch job from the in-memory storage
    batch_response = batch_storage.get(batch_id)
    if batch_response is None:
        raise HTTPException(status_code=404, detail="Batch not found")

    return batch_response


async def v1_retrieve_file(file_id: str):
    # Retrieve the batch job from the in-memory storage
    file_response = file_id_response.get(file_id)
    if file_response is None:
        raise HTTPException(status_code=404, detail="File not found")
    return file_response


async def v1_retrieve_file_content(file_id: str):
    file_pth = file_id_storage.get(file_id)
    if not file_pth or not os.path.exists(file_pth):
        raise HTTPException(status_code=404, detail="File not found")

    def iter_file():
        with open(file_pth, mode="rb") as file_like:
            yield from file_like

    return StreamingResponse(iter_file(), media_type="application/octet-stream")


def v1_generate_request(all_requests):
    prompts = []
    sampling_params_list = []
378
379
    return_logprobs = []
    top_logprobs_nums = []
380
    first_prompt_type = type(all_requests[0].prompt)
yichuan~'s avatar
yichuan~ committed
381

382
383
384
385
386
387
    for request in all_requests:
        prompt = request.prompt
        assert (
            type(prompt) == first_prompt_type
        ), "All prompts must be of the same type in file input settings"
        prompts.append(prompt)
388
389
390
391
        return_logprobs.append(request.logprobs is not None and request.logprobs > 0)
        top_logprobs_nums.append(
            request.logprobs if request.logprobs is not None else 0
        )
392
393
394
395
        sampling_params_list.append(
            {
                "temperature": request.temperature,
                "max_new_tokens": request.max_tokens,
396
                "min_new_tokens": request.min_tokens,
397
                "stop": request.stop,
398
                "stop_token_ids": request.stop_token_ids,
399
400
401
                "top_p": request.top_p,
                "presence_penalty": request.presence_penalty,
                "frequency_penalty": request.frequency_penalty,
402
                "repetition_penalty": request.repetition_penalty,
403
404
405
406
407
408
409
                "regex": request.regex,
                "n": request.n,
                "ignore_eos": request.ignore_eos,
            }
        )
        if len(all_requests) > 1 and request.n > 1:
            raise ValueError(
yichuan~'s avatar
yichuan~ committed
410
                "Parallel sampling is not supported for completions from files"
411
412
413
414
415
            )

    if len(all_requests) == 1:
        prompt = prompts[0]
        sampling_params_list = sampling_params_list[0]
416
417
        return_logprobs = return_logprobs[0]
        top_logprobs_nums = top_logprobs_nums[0]
yichuan~'s avatar
yichuan~ committed
418
        if isinstance(prompt, str) or isinstance(prompt[0], str):
419
420
421
            prompt_kwargs = {"text": prompt}
        else:
            prompt_kwargs = {"input_ids": prompt}
422
    else:
423
        if isinstance(prompts[0], str):
424
425
426
            prompt_kwargs = {"text": prompts}
        else:
            prompt_kwargs = {"input_ids": prompts}
yichuan~'s avatar
yichuan~ committed
427

428
    adapted_request = GenerateReqInput(
429
        **prompt_kwargs,
430
        sampling_params=sampling_params_list,
431
432
        return_logprob=return_logprobs,
        top_logprobs_num=top_logprobs_nums,
433
        return_text_in_logprobs=True,
434
        stream=all_requests[0].stream,
435
    )
yichuan~'s avatar
yichuan~ committed
436

437
438
439
440
441
    if len(all_requests) == 1:
        return adapted_request, all_requests[0]
    return adapted_request, all_requests


yichuan~'s avatar
yichuan~ committed
442
def v1_generate_response(request, ret, tokenizer_manager, to_file=False):
443
444
445
    choices = []
    echo = False

yichuan~'s avatar
yichuan~ committed
446
    if (not isinstance(request, list)) and request.echo:
447
        # TODO: handle the case propmt is token ids
yichuan~'s avatar
yichuan~ committed
448
449
        if isinstance(request.prompt, list) and isinstance(request.prompt[0], str):
            # for the case of multiple str prompts
450
            prompts = request.prompt
yichuan~'s avatar
yichuan~ committed
451
452
453
454
455
456
457
458
459
460
461
462
463
        elif isinstance(request.prompt, list) and isinstance(request.prompt[0], list):
            # for the case of multiple token ids prompts
            prompts = [
                tokenizer_manager.tokenizer.decode(prompt, skip_special_tokens=True)
                for prompt in request.prompt
            ]
        elif isinstance(request.prompt, list) and isinstance(request.prompt[0], int):
            # for the case of single token ids prompt
            prompts = [
                tokenizer_manager.tokenizer.decode(
                    request.prompt, skip_special_tokens=True
                )
            ]
464
        else:
yichuan~'s avatar
yichuan~ committed
465
            # for the case of single str prompt
466
467
468
469
470
            prompts = [request.prompt]
        echo = True

    for idx, ret_item in enumerate(ret):
        text = ret_item["text"]
yichuan~'s avatar
yichuan~ committed
471
        if isinstance(request, list) and request[idx].echo:
472
473
            echo = True
            text = request[idx].prompt + text
yichuan~'s avatar
yichuan~ committed
474
475
476
        if (not isinstance(request, list)) and echo:
            prompt_index = idx // request.n
            text = prompts[prompt_index] + text
477
478

        logprobs = False
yichuan~'s avatar
yichuan~ committed
479
        if isinstance(request, list) and request[idx].logprobs:
480
            logprobs = True
yichuan~'s avatar
yichuan~ committed
481
        elif (not isinstance(request, list)) and request.logprobs:
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
            logprobs = True
        if logprobs:
            if echo:
                input_token_logprobs = ret_item["meta_info"]["input_token_logprobs"]
                input_top_logprobs = ret_item["meta_info"]["input_top_logprobs"]
            else:
                input_token_logprobs = None
                input_top_logprobs = None

            logprobs = to_openai_style_logprobs(
                input_token_logprobs=input_token_logprobs,
                input_top_logprobs=input_top_logprobs,
                output_token_logprobs=ret_item["meta_info"]["output_token_logprobs"],
                output_top_logprobs=ret_item["meta_info"]["output_top_logprobs"],
            )
        else:
            logprobs = None

        if to_file:
501
            # to make the choise data json serializable
502
503
504
505
            choice_data = {
                "index": 0,
                "text": text,
                "logprobs": logprobs,
506
507
508
                "finish_reason": format_finish_reason(
                    ret_item["meta_info"]["finish_reason"]
                ),
509
510
511
512
513
514
            }
        else:
            choice_data = CompletionResponseChoice(
                index=idx,
                text=text,
                logprobs=logprobs,
515
516
517
                finish_reason=format_finish_reason(
                    ret_item["meta_info"]["finish_reason"]
                ),
518
519
520
521
522
523
524
525
526
527
528
            )

        choices.append(choice_data)

    if to_file:
        responses = []
        for i, choice in enumerate(choices):
            response = {
                "status_code": 200,
                "request_id": ret[i]["meta_info"]["id"],
                "body": {
529
                    # remain the same but if needed we can change that
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
                    "id": ret[i]["meta_info"]["id"],
                    "object": "text_completion",
                    "created": int(time.time()),
                    "model": request[i].model,
                    "choices": choice,
                    "usage": {
                        "prompt_tokens": ret[i]["meta_info"]["prompt_tokens"],
                        "completion_tokens": ret[i]["meta_info"]["completion_tokens"],
                        "total_tokens": ret[i]["meta_info"]["prompt_tokens"]
                        + ret[i]["meta_info"]["completion_tokens"],
                    },
                    "system_fingerprint": None,
                },
            }
            responses.append(response)
        return responses
    else:
547
548
549
        prompt_tokens = sum(
            ret[i]["meta_info"]["prompt_tokens"] for i in range(0, len(ret), request.n)
        )
550
551
552
553
554
555
        completion_tokens = sum(item["meta_info"]["completion_tokens"] for item in ret)
        response = CompletionResponse(
            id=ret[0]["meta_info"]["id"],
            model=request.model,
            choices=choices,
            usage=UsageInfo(
yichuan~'s avatar
yichuan~ committed
556
                prompt_tokens=prompt_tokens,
557
                completion_tokens=completion_tokens,
yichuan~'s avatar
yichuan~ committed
558
                total_tokens=prompt_tokens + completion_tokens,
559
560
561
562
563
564
565
566
567
            ),
        )
    return response


async def v1_completions(tokenizer_manager, raw_request: Request):
    request_json = await raw_request.json()
    all_requests = [CompletionRequest(**request_json)]
    adapted_request, request = v1_generate_request(all_requests)
568
569
570
571
572
573

    if adapted_request.stream:

        async def generate_stream_resp():
            stream_buffer = ""
            n_prev_token = 0
574
575
            try:
                async for content in tokenizer_manager.generate_request(
576
577
                    adapted_request, raw_request
                ):
578
579
580
581
582
583
                    text = content["text"]
                    prompt_tokens = content["meta_info"]["prompt_tokens"]
                    completion_tokens = content["meta_info"]["completion_tokens"]

                    if not stream_buffer:  # The first chunk
                        if request.echo:
yichuan~'s avatar
yichuan~ committed
584
585
586
587
588
589
590
591
592
593
                            if isinstance(request.prompt, str):
                                # for the case of single str prompts
                                prompts = request.prompt
                            elif isinstance(request.prompt, list) and isinstance(
                                request.prompt[0], int
                            ):
                                prompts = tokenizer_manager.tokenizer.decode(
                                    request.prompt, skip_special_tokens=True
                                )

594
                            # Prepend prompt in response text.
yichuan~'s avatar
yichuan~ committed
595
                            text = prompts + text
596
597
598
599

                    if request.logprobs:
                        # The first chunk and echo is enabled.
                        if not stream_buffer and request.echo:
600
601
                            input_token_logprobs = content["meta_info"][
                                "input_token_logprobs"
602
                            ]
603
604
                            input_top_logprobs = content["meta_info"][
                                "input_top_logprobs"
605
606
                            ]
                        else:
607
608
                            input_token_logprobs = None
                            input_top_logprobs = None
609
610

                        logprobs = to_openai_style_logprobs(
611
612
613
614
                            input_token_logprobs=input_token_logprobs,
                            input_top_logprobs=input_top_logprobs,
                            output_token_logprobs=content["meta_info"][
                                "output_token_logprobs"
615
                            ][n_prev_token:],
616
617
                            output_top_logprobs=content["meta_info"][
                                "output_top_logprobs"
618
                            ][n_prev_token:],
619
                        )
620
                        n_prev_token = len(
621
                            content["meta_info"]["output_token_logprobs"]
622
                        )
623
                    else:
624
                        logprobs = None
625

626
                    delta = text[len(stream_buffer) :]
Liangsheng Yin's avatar
Liangsheng Yin committed
627
                    stream_buffer = stream_buffer + delta
628
629
630
631
                    choice_data = CompletionResponseStreamChoice(
                        index=0,
                        text=delta,
                        logprobs=logprobs,
632
633
634
                        finish_reason=format_finish_reason(
                            content["meta_info"]["finish_reason"]
                        ),
635
636
637
638
639
640
641
642
                    )
                    chunk = CompletionStreamResponse(
                        id=content["meta_info"]["id"],
                        object="text_completion",
                        choices=[choice_data],
                        model=request.model,
                    )
                    yield f"data: {chunk.model_dump_json()}\n\n"
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
                if request.stream_options and request.stream_options.include_usage:
                    usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
                        completion_tokens=completion_tokens,
                        total_tokens=prompt_tokens + completion_tokens,
                    )

                    final_usage_chunk = CompletionStreamResponse(
                        id=str(uuid.uuid4().hex),
                        choices=[],
                        model=request.model,
                        usage=usage,
                    )
                    final_usage_data = final_usage_chunk.model_dump_json(
                        exclude_unset=True, exclude_none=True
                    )
                    yield f"data: {final_usage_data}\n\n"
660
661
662
            except ValueError as e:
                error = create_streaming_error_response(str(e))
                yield f"data: {error}\n\n"
663
664
            yield "data: [DONE]\n\n"

665
666
667
668
669
        return StreamingResponse(
            generate_stream_resp(),
            media_type="text/event-stream",
            background=tokenizer_manager.create_abort_task(adapted_request),
        )
670
671

    # Non-streaming response.
672
673
    try:
        ret = await tokenizer_manager.generate_request(
674
675
            adapted_request, raw_request
        ).__anext__()
676
677
    except ValueError as e:
        return create_error_response(str(e))
678

679
680
681
    if not isinstance(ret, list):
        ret = [ret]

yichuan~'s avatar
yichuan~ committed
682
    response = v1_generate_response(request, ret, tokenizer_manager)
683
    return response
684

685

686
def v1_chat_generate_request(all_requests, tokenizer_manager):
687
    input_ids = []
688
689
    sampling_params_list = []
    image_data_list = []
690
691
    return_logprobs = []
    top_logprobs_nums = []
692
693
694
695
696
697
698
699
700
    for request in all_requests:
        # Prep the data needed for the underlying GenerateReqInput:
        #  - prompt: The full prompt string.
        #  - stop: Custom stop tokens.
        #  - image_data: None or a list of image strings (URLs or base64 strings).
        #    None skips any image processing in GenerateReqInput.
        if not isinstance(request.messages, str):
            # Apply chat template and its stop strings.
            if chat_template_name is None:
701
702
                prompt_ids = tokenizer_manager.tokenizer.apply_chat_template(
                    request.messages, tokenize=True, add_generation_prompt=True
703
704
705
                )
                stop = request.stop
                image_data = None
706
            else:
707
708
709
710
711
712
713
714
715
                conv = generate_chat_conv(request, chat_template_name)
                prompt = conv.get_prompt()
                image_data = conv.image_data
                stop = conv.stop_str or []
                if request.stop:
                    if isinstance(request.stop, str):
                        stop.append(request.stop)
                    else:
                        stop.extend(request.stop)
716
                prompt_ids = tokenizer_manager.tokenizer.encode(prompt)
717
        else:
718
            # Use the raw prompt and stop strings if the messages is already a string.
yichuan~'s avatar
yichuan~ committed
719
            prompt_ids = request.messages
720
721
            stop = request.stop
            image_data = None
722
        input_ids.append(prompt_ids)
723
724
        return_logprobs.append(request.logprobs)
        top_logprobs_nums.append(request.top_logprobs)
725
726
727
728
        sampling_params_list.append(
            {
                "temperature": request.temperature,
                "max_new_tokens": request.max_tokens,
729
                "min_new_tokens": request.min_tokens,
730
                "stop": stop,
731
                "stop_token_ids": request.stop_token_ids,
732
733
734
                "top_p": request.top_p,
                "presence_penalty": request.presence_penalty,
                "frequency_penalty": request.frequency_penalty,
735
                "repetition_penalty": request.repetition_penalty,
736
737
738
                "regex": request.regex,
                "n": request.n,
            }
739
        )
740
741
        image_data_list.append(image_data)
    if len(all_requests) == 1:
742
        input_ids = input_ids[0]
yichuan~'s avatar
yichuan~ committed
743
744
745
746
        if isinstance(input_ids, str):
            prompt_kwargs = {"text": input_ids}
        else:
            prompt_kwargs = {"input_ids": input_ids}
747
748
        sampling_params_list = sampling_params_list[0]
        image_data = image_data_list[0]
749
750
        return_logprobs = return_logprobs[0]
        top_logprobs_nums = top_logprobs_nums[0]
yichuan~'s avatar
yichuan~ committed
751
752
753
754
755
    else:
        if isinstance(input_ids[0], str):
            prompt_kwargs = {"text": input_ids}
        else:
            prompt_kwargs = {"input_ids": input_ids}
756
    adapted_request = GenerateReqInput(
yichuan~'s avatar
yichuan~ committed
757
        **prompt_kwargs,
758
759
        image_data=image_data,
        sampling_params=sampling_params_list,
760
761
762
763
        return_logprob=return_logprobs,
        top_logprobs_num=top_logprobs_nums,
        stream=all_requests[0].stream,
        return_text_in_logprobs=True,
764
    )
765
766
767
    if len(all_requests) == 1:
        return adapted_request, all_requests[0]
    return adapted_request, all_requests
768

769

770
771
772
773
def v1_chat_generate_response(request, ret, to_file=False):
    choices = []

    for idx, ret_item in enumerate(ret):
774
        logprobs = False
yichuan~'s avatar
yichuan~ committed
775
        if isinstance(request, list) and request[idx].logprobs:
776
            logprobs = True
yichuan~'s avatar
yichuan~ committed
777
        elif (not isinstance(request, list)) and request.logprobs:
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
            logprobs = True
        if logprobs:
            logprobs = to_openai_style_logprobs(
                output_token_logprobs=ret_item["meta_info"]["output_token_logprobs"],
                output_top_logprobs=ret_item["meta_info"]["output_top_logprobs"],
            )
            token_logprobs = []
            for token, logprob in zip(logprobs.tokens, logprobs.token_logprobs):
                token_bytes = list(token.encode("utf-8"))
                top_logprobs = []
                if logprobs.top_logprobs:
                    for top_token, top_logprob in logprobs.top_logprobs[0].items():
                        top_token_bytes = list(top_token.encode("utf-8"))
                        top_logprobs.append(
                            TopLogprob(
                                token=top_token,
                                bytes=top_token_bytes,
                                logprob=top_logprob,
                            )
                        )
                token_logprobs.append(
                    ChatCompletionTokenLogprob(
                        token=token,
                        bytes=token_bytes,
                        logprob=logprob,
                        top_logprobs=top_logprobs,
                    )
                )

            choice_logprobs = ChoiceLogprobs(content=token_logprobs)
        else:
            choice_logprobs = None
810

811
        if to_file:
812
            # to make the choice data json serializable
813
814
815
            choice_data = {
                "index": 0,
                "message": {"role": "assistant", "content": ret_item["text"]},
816
                "logprobs": choice_logprobs,
817
818
819
                "finish_reason": format_finish_reason(
                    ret_item["meta_info"]["finish_reason"]
                ),
820
            }
821
        else:
822
823
824
            choice_data = ChatCompletionResponseChoice(
                index=idx,
                message=ChatMessage(role="assistant", content=ret_item["text"]),
825
                logprobs=choice_logprobs,
826
827
828
                finish_reason=format_finish_reason(
                    ret_item["meta_info"]["finish_reason"]
                ),
829
830
831
            )

        choices.append(choice_data)
832

833
834
835
836
837
838
839
840
    if to_file:
        responses = []

        for i, choice in enumerate(choices):
            response = {
                "status_code": 200,
                "request_id": ret[i]["meta_info"]["id"],
                "body": {
841
                    # remain the same but if needed we can change that
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
                    "id": ret[i]["meta_info"]["id"],
                    "object": "chat.completion",
                    "created": int(time.time()),
                    "model": request[i].model,
                    "choices": choice,
                    "usage": {
                        "prompt_tokens": ret[i]["meta_info"]["prompt_tokens"],
                        "completion_tokens": ret[i]["meta_info"]["completion_tokens"],
                        "total_tokens": ret[i]["meta_info"]["prompt_tokens"]
                        + ret[i]["meta_info"]["completion_tokens"],
                    },
                    "system_fingerprint": None,
                },
            }
            responses.append(response)
        return responses
858
    else:
859
860
861
862
        prompt_tokens = sum(
            ret[i]["meta_info"]["prompt_tokens"] for i in range(0, len(ret), request.n)
        )
        completion_tokens = sum(item["meta_info"]["completion_tokens"] for item in ret)
863
864
865
866
867
        response = ChatCompletionResponse(
            id=ret[0]["meta_info"]["id"],
            model=request.model,
            choices=choices,
            usage=UsageInfo(
868
869
870
                prompt_tokens=prompt_tokens,
                completion_tokens=completion_tokens,
                total_tokens=prompt_tokens + completion_tokens,
871
872
873
            ),
        )
        return response
874

875
876
877
878
879

async def v1_chat_completions(tokenizer_manager, raw_request: Request):
    request_json = await raw_request.json()
    all_requests = [ChatCompletionRequest(**request_json)]
    adapted_request, request = v1_chat_generate_request(all_requests, tokenizer_manager)
880
881
882
883
884
885
886

    if adapted_request.stream:

        async def generate_stream_resp():
            is_first = True

            stream_buffer = ""
yichuan~'s avatar
yichuan~ committed
887
            n_prev_token = 0
888
            try:
889
890
891
                async for content in tokenizer_manager.generate_request(
                    adapted_request, raw_request
                ):
yichuan~'s avatar
yichuan~ committed
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
                    prompt_tokens = content["meta_info"]["prompt_tokens"]
                    completion_tokens = content["meta_info"]["completion_tokens"]
                    if request.logprobs:
                        logprobs = to_openai_style_logprobs(
                            output_token_logprobs=content["meta_info"][
                                "output_token_logprobs"
                            ][n_prev_token:],
                            output_top_logprobs=content["meta_info"][
                                "output_top_logprobs"
                            ][n_prev_token:],
                        )

                        n_prev_token = len(
                            content["meta_info"]["output_token_logprobs"]
                        )
                        token_logprobs = []
                        for token, logprob in zip(
                            logprobs.tokens, logprobs.token_logprobs
                        ):
                            token_bytes = list(token.encode("utf-8"))
                            top_logprobs = []
                            if logprobs.top_logprobs:
                                for top_token, top_logprob in logprobs.top_logprobs[
                                    0
                                ].items():
                                    top_token_bytes = list(top_token.encode("utf-8"))
                                    top_logprobs.append(
                                        TopLogprob(
                                            token=top_token,
                                            bytes=top_token_bytes,
                                            logprob=top_logprob,
                                        )
                                    )
                            token_logprobs.append(
                                ChatCompletionTokenLogprob(
                                    token=token,
                                    bytes=token_bytes,
                                    logprob=logprob,
                                    top_logprobs=top_logprobs,
                                )
                            )

                        choice_logprobs = ChoiceLogprobs(content=token_logprobs)

                    else:
                        choice_logprobs = None

939
940
941
942
943
944
                    if is_first:
                        # First chunk with role
                        is_first = False
                        choice_data = ChatCompletionResponseStreamChoice(
                            index=0,
                            delta=DeltaMessage(role="assistant"),
945
946
947
                            finish_reason=format_finish_reason(
                                content["meta_info"]["finish_reason"]
                            ),
yichuan~'s avatar
yichuan~ committed
948
                            logprobs=choice_logprobs,
949
950
951
952
953
954
955
956
957
958
                        )
                        chunk = ChatCompletionStreamResponse(
                            id=content["meta_info"]["id"],
                            choices=[choice_data],
                            model=request.model,
                        )
                        yield f"data: {chunk.model_dump_json()}\n\n"

                    text = content["text"]
                    delta = text[len(stream_buffer) :]
Liangsheng Yin's avatar
Liangsheng Yin committed
959
                    stream_buffer = stream_buffer + delta
960
961
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=0,
962
                        delta=DeltaMessage(content=delta),
963
964
965
                        finish_reason=format_finish_reason(
                            content["meta_info"]["finish_reason"]
                        ),
yichuan~'s avatar
yichuan~ committed
966
                        logprobs=choice_logprobs,
967
968
969
970
971
972
                    )
                    chunk = ChatCompletionStreamResponse(
                        id=content["meta_info"]["id"],
                        choices=[choice_data],
                        model=request.model,
                    )
973
                    yield f"data: {chunk.model_dump_json()}\n\n"
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
                if request.stream_options and request.stream_options.include_usage:
                    usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
                        completion_tokens=completion_tokens,
                        total_tokens=prompt_tokens + completion_tokens,
                    )

                    final_usage_chunk = ChatCompletionStreamResponse(
                        id=str(uuid.uuid4().hex),
                        choices=[],
                        model=request.model,
                        usage=usage,
                    )
                    final_usage_data = final_usage_chunk.model_dump_json(
                        exclude_unset=True, exclude_none=True
                    )
                    yield f"data: {final_usage_data}\n\n"
991
992
993
            except ValueError as e:
                error = create_streaming_error_response(str(e))
                yield f"data: {error}\n\n"
994
995
            yield "data: [DONE]\n\n"

996
997
998
999
1000
        return StreamingResponse(
            generate_stream_resp(),
            media_type="text/event-stream",
            background=tokenizer_manager.create_abort_task(adapted_request),
        )
1001
1002

    # Non-streaming response.
1003
1004
    try:
        ret = await tokenizer_manager.generate_request(
1005
1006
            adapted_request, raw_request
        ).__anext__()
1007
1008
    except ValueError as e:
        return create_error_response(str(e))
1009
1010
1011
    if not isinstance(ret, list):
        ret = [ret]

1012
    response = v1_chat_generate_response(request, ret)
1013

1014
1015
1016
    return response


1017
1018
1019
def v1_embedding_request(all_requests, tokenizer_manager):
    prompts = []
    sampling_params_list = []
Ying Sheng's avatar
Ying Sheng committed
1020
    first_prompt_type = type(all_requests[0].input)
1021
1022

    for request in all_requests:
Ying Sheng's avatar
Ying Sheng committed
1023
        prompt = request.input
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
        assert (
            type(prompt) == first_prompt_type
        ), "All prompts must be of the same type in file input settings"
        prompts.append(prompt)

    if len(all_requests) == 1:
        prompt = prompts[0]
        if isinstance(prompt, str) or isinstance(prompt[0], str):
            prompt_kwargs = {"text": prompt}
        else:
            prompt_kwargs = {"input_ids": prompt}
    else:
        if isinstance(prompts[0], str) or isinstance(propmt[0][0], str):
            prompt_kwargs = {"text": prompts}
        else:
            prompt_kwargs = {"input_ids": prompts}

    adapted_request = EmbeddingReqInput(
        **prompt_kwargs,
    )

    if len(all_requests) == 1:
        return adapted_request, all_requests[0]
    return adapted_request, all_requests


Ying Sheng's avatar
Ying Sheng committed
1050
1051
1052
def v1_embedding_response(ret, model_path, to_file=False):
    embedding_objects = []
    prompt_tokens = 0
1053
    for idx, ret_item in enumerate(ret):
Ying Sheng's avatar
Ying Sheng committed
1054
1055
1056
        embedding_objects.append(
            EmbeddingObject(
                embedding=ret[idx]["embedding"],
1057
1058
1059
                index=idx,
            )
        )
Ying Sheng's avatar
Ying Sheng committed
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
        prompt_tokens += ret[idx]["meta_info"]["prompt_tokens"]

    return EmbeddingResponse(
        data=embedding_objects,
        model=model_path,
        usage=UsageInfo(
            prompt_tokens=prompt_tokens,
            total_tokens=prompt_tokens,
        ),
    )
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086


async def v1_embeddings(tokenizer_manager, raw_request: Request):
    request_json = await raw_request.json()
    all_requests = [EmbeddingRequest(**request_json)]
    adapted_request, request = v1_embedding_request(all_requests, tokenizer_manager)

    try:
        ret = await tokenizer_manager.generate_request(
            adapted_request, raw_request
        ).__anext__()
    except ValueError as e:
        return create_error_response(str(e))

    if not isinstance(ret, list):
        ret = [ret]

Ying Sheng's avatar
Ying Sheng committed
1087
    response = v1_embedding_response(ret, tokenizer_manager.model_path)
1088
1089
1090
1091

    return response


1092
def to_openai_style_logprobs(
1093
1094
1095
1096
    input_token_logprobs=None,
    output_token_logprobs=None,
    input_top_logprobs=None,
    output_top_logprobs=None,
1097
1098
1099
1100
1101
1102
1103
1104
):
    ret_logprobs = LogProbs()

    def append_token_logprobs(token_logprobs):
        for logprob, _, token_text in token_logprobs:
            ret_logprobs.tokens.append(token_text)
            ret_logprobs.token_logprobs.append(logprob)

1105
            # Not supported yet
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
            ret_logprobs.text_offset.append(-1)

    def append_top_logprobs(top_logprobs):
        for tokens in top_logprobs:
            if tokens is not None:
                ret_logprobs.top_logprobs.append(
                    {token[2]: token[0] for token in tokens}
                )
            else:
                ret_logprobs.top_logprobs.append(None)

1117
1118
1119
1120
1121
1122
1123
1124
    if input_token_logprobs is not None:
        append_token_logprobs(input_token_logprobs)
    if output_token_logprobs is not None:
        append_token_logprobs(output_token_logprobs)
    if input_top_logprobs is not None:
        append_top_logprobs(input_top_logprobs)
    if output_top_logprobs is not None:
        append_top_logprobs(output_top_logprobs)
1125

Liangsheng Yin's avatar
Liangsheng Yin committed
1126
    return ret_logprobs