adapter.py 76 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
# Copyright 2023-2024 SGLang Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
14
"""Conversion between OpenAI APIs and native SRT APIs"""
Liangsheng Yin's avatar
Liangsheng Yin committed
15

16
import asyncio
17
import base64
18
import json
19
import logging
20
import os
21
22
import time
import uuid
23
from http import HTTPStatus
Lianmin Zheng's avatar
Lianmin Zheng committed
24
from typing import Dict, List
25

26
from fastapi import HTTPException, Request, UploadFile
27
from fastapi.responses import ORJSONResponse, StreamingResponse
28
from pydantic import ValidationError
29

30
31
32
33
from sglang.srt.code_completion_parser import (
    generate_completion_prompt_from_request,
    is_completion_template_defined,
)
34
35
36
37
38
from sglang.srt.conversation import (
    Conversation,
    SeparatorStyle,
    chat_template_exists,
    generate_chat_conv,
39
    generate_embedding_convs,
40
    get_conv_template_by_model_path,
41
42
    register_conv_template,
)
43
from sglang.srt.function_call_parser import FunctionCallParser
Ying Sheng's avatar
Ying Sheng committed
44
from sglang.srt.managers.io_struct import EmbeddingReqInput, GenerateReqInput
Mingyi's avatar
Mingyi committed
45
from sglang.srt.openai_api.protocol import (
46
47
    BatchRequest,
    BatchResponse,
48
49
50
51
52
    ChatCompletionRequest,
    ChatCompletionResponse,
    ChatCompletionResponseChoice,
    ChatCompletionResponseStreamChoice,
    ChatCompletionStreamResponse,
53
    ChatCompletionTokenLogprob,
54
    ChatMessage,
55
    ChoiceLogprobs,
56
57
58
59
60
61
    CompletionRequest,
    CompletionResponse,
    CompletionResponseChoice,
    CompletionResponseStreamChoice,
    CompletionStreamResponse,
    DeltaMessage,
Ying Sheng's avatar
Ying Sheng committed
62
    EmbeddingObject,
63
64
    EmbeddingRequest,
    EmbeddingResponse,
65
    ErrorResponse,
66
    FileDeleteResponse,
67
68
    FileRequest,
    FileResponse,
Tanjiro's avatar
Tanjiro committed
69
    FunctionResponse,
70
    LogProbs,
71
    MultimodalEmbeddingInput,
Tanjiro's avatar
Tanjiro committed
72
    ToolCall,
73
    TopLogprob,
74
75
    UsageInfo,
)
Xihuai Wang's avatar
Xihuai Wang committed
76
from sglang.srt.reasoning_parser import ReasoningParser
77
from sglang.utils import convert_json_schema_to_str, get_exception_traceback
78

79
80
logger = logging.getLogger(__name__)

81
82
chat_template_name = None

Liangsheng Yin's avatar
Liangsheng Yin committed
83

84
85
86
87
88
89
90
91
92
93
class FileMetadata:
    def __init__(self, filename: str, purpose: str):
        self.filename = filename
        self.purpose = purpose


# In-memory storage for batch jobs and files
batch_storage: Dict[str, BatchResponse] = {}
file_id_request: Dict[str, FileMetadata] = {}
file_id_response: Dict[str, FileResponse] = {}
94
# map file id to file path in SGLang backend
95
96
97
98
99
100
file_id_storage: Dict[str, str] = {}

# backend storage directory
storage_dir = None


101
102
103
def create_error_response(
    message: str,
    err_type: str = "BadRequestError",
104
105
106
    status_code: HTTPStatus = HTTPStatus.BAD_REQUEST,
):
    error = ErrorResponse(message=message, type=err_type, code=status_code.value)
107
    return ORJSONResponse(content=error.model_dump(), status_code=error.code)
108
109
110
111
112


def create_streaming_error_response(
    message: str,
    err_type: str = "BadRequestError",
113
114
115
    status_code: HTTPStatus = HTTPStatus.BAD_REQUEST,
) -> str:
    error = ErrorResponse(message=message, type=err_type, code=status_code.value)
116
117
118
119
    json_str = json.dumps({"error": error.model_dump()})
    return json_str


120
def load_chat_template_for_openai_api(tokenizer_manager, chat_template_arg, model_path):
121
122
    global chat_template_name

123
124
125
    logger.info(
        f"Use chat template for the OpenAI-compatible API server: {chat_template_arg}"
    )
126

127
128
129
130
131
132
    if not chat_template_exists(chat_template_arg):
        if not os.path.exists(chat_template_arg):
            raise RuntimeError(
                f"Chat template {chat_template_arg} is not a built-in template name "
                "or a valid chat template file path."
            )
133
134
135
        if chat_template_arg.endswith(".jinja"):
            with open(chat_template_arg, "r") as f:
                chat_template = "".join(f.readlines()).strip("\n")
136
137
138
            tokenizer_manager.tokenizer.chat_template = chat_template.replace(
                "\\n", "\n"
            )
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
            chat_template_name = None
        else:
            assert chat_template_arg.endswith(
                ".json"
            ), "unrecognized format of chat template file"
            with open(chat_template_arg, "r") as filep:
                template = json.load(filep)
                try:
                    sep_style = SeparatorStyle[template["sep_style"]]
                except KeyError:
                    raise ValueError(
                        f"Unknown separator style: {template['sep_style']}"
                    ) from None
                register_conv_template(
                    Conversation(
                        name=template["name"],
                        system_template=template["system"] + "\n{system_message}",
                        system_message=template.get("system_message", ""),
                        roles=(template["user"], template["assistant"]),
                        sep_style=sep_style,
                        sep=template.get("sep", "\n"),
                        stop_str=template["stop_str"],
                    ),
                    override=True,
                )
            chat_template_name = template["name"]
165
166
167
    else:
        chat_template_name = chat_template_arg

168
169
170
171
172
173
174
175

def guess_chat_template_name_from_model_path(model_path):
    global chat_template_name
    chat_template_name = get_conv_template_by_model_path(model_path)
    if chat_template_name is not None:
        logger.info(
            f"Infer the chat template name from the model path and obtain the result: {chat_template_name}."
        )
176

177

178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
def _validate_prompt(prompt: str):
    """Validate that the prompt is not empty or whitespace only."""
    is_invalid = False

    # Check for empty/whitespace string
    if isinstance(prompt, str):
        is_invalid = not prompt.strip()
    # Check for various invalid list cases: [], [""], [" "], [[]]
    elif isinstance(prompt, list):
        is_invalid = not prompt or (
            len(prompt) == 1
            and (
                (isinstance(prompt[0], str) and not prompt[0].strip())
                or (isinstance(prompt[0], list) and not prompt[0])
            )
        )

    if is_invalid:
        raise HTTPException(
            status_code=400,
            detail="Input cannot be empty or contain only whitespace.",
        )

    return prompt


204
205
206
async def v1_files_create(
    file: UploadFile, purpose: str, file_storage_path: str = None
):
207
208
    try:
        global storage_dir
209
210
        if file_storage_path:
            storage_dir = file_storage_path
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
        # Read the file content
        file_content = await file.read()

        # Create an instance of RequestBody
        request_body = FileRequest(file=file_content, purpose=purpose)

        # Save the file to the sglang_oai_storage directory
        os.makedirs(storage_dir, exist_ok=True)
        file_id = f"backend_input_file-{uuid.uuid4()}"
        filename = f"{file_id}.jsonl"
        file_path = os.path.join(storage_dir, filename)

        with open(file_path, "wb") as f:
            f.write(request_body.file)

        # add info to global file map
        file_id_request[file_id] = FileMetadata(filename=file.filename, purpose=purpose)
        file_id_storage[file_id] = file_path

        # Return the response in the required format
        response = FileResponse(
            id=file_id,
            bytes=len(request_body.file),
            created_at=int(time.time()),
            filename=file.filename,
            purpose=request_body.purpose,
        )
        file_id_response[file_id] = response

        return response
    except ValidationError as e:
        return {"error": "Invalid input", "details": e.errors()}


245
246
247
248
249
250
251
252
253
254
255
256
257
258
async def v1_delete_file(file_id: str):
    # Retrieve the file job from the in-memory storage
    file_response = file_id_response.get(file_id)
    if file_response is None:
        raise HTTPException(status_code=404, detail="File not found")
    file_path = file_id_storage.get(file_id)
    if file_path is None:
        raise HTTPException(status_code=404, detail="File not found")
    os.remove(file_path)
    del file_id_response[file_id]
    del file_id_storage[file_id]
    return FileDeleteResponse(id=file_id, deleted=True)


259
async def v1_batches(tokenizer_manager, raw_request: Request):
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
    try:
        body = await raw_request.json()

        batch_request = BatchRequest(**body)

        batch_id = f"batch_{uuid.uuid4()}"

        # Create an instance of BatchResponse
        batch_response = BatchResponse(
            id=batch_id,
            endpoint=batch_request.endpoint,
            input_file_id=batch_request.input_file_id,
            completion_window=batch_request.completion_window,
            created_at=int(time.time()),
            metadata=batch_request.metadata,
        )

        batch_storage[batch_id] = batch_response

        # Start processing the batch asynchronously
280
        asyncio.create_task(process_batch(tokenizer_manager, batch_id, batch_request))
281
282
283
284
285
286
287
288
289
290

        # Return the initial batch_response
        return batch_response

    except ValidationError as e:
        return {"error": "Invalid input", "details": e.errors()}
    except Exception as e:
        return {"error": str(e)}


291
async def process_batch(tokenizer_manager, batch_id: str, batch_request: BatchRequest):
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
    try:
        # Update the batch status to "in_progress"
        batch_storage[batch_id].status = "in_progress"
        batch_storage[batch_id].in_progress_at = int(time.time())

        # Retrieve the input file content
        input_file_request = file_id_request.get(batch_request.input_file_id)
        if not input_file_request:
            raise ValueError("Input file not found")

        # Parse the JSONL file and process each request
        input_file_path = file_id_storage.get(batch_request.input_file_id)
        with open(input_file_path, "r", encoding="utf-8") as f:
            lines = f.readlines()

        total_requests = len(lines)
        completed_requests = 0
        failed_requests = 0

        all_ret = []
        end_point = batch_storage[batch_id].endpoint
        file_request_list = []
        all_requests = []
315
        request_ids = []
316
        for line_id, line in enumerate(lines):
317
318
319
            request_data = json.loads(line)
            file_request_list.append(request_data)
            body = request_data["body"]
320
            request_ids.append(f"{batch_id}-req_{line_id}")
321
322
323
324
325
326

            # Although streaming is supported for standalone completions, it is not supported in
            # batch mode (multiple completions in single request).
            if body.get("stream", False):
                raise ValueError("Streaming requests are not supported in batch mode")

327
328
329
330
            if end_point == "/v1/chat/completions":
                all_requests.append(ChatCompletionRequest(**body))
            elif end_point == "/v1/completions":
                all_requests.append(CompletionRequest(**body))
331

332
333
        if end_point == "/v1/chat/completions":
            adapted_request, request = v1_chat_generate_request(
334
                all_requests, tokenizer_manager, request_ids=request_ids
335
336
            )
        elif end_point == "/v1/completions":
337
338
339
340
            adapted_request, request = v1_generate_request(
                all_requests, request_ids=request_ids
            )

341
        try:
342
            created = int(time.time())
343
            ret = await tokenizer_manager.generate_request(adapted_request).__anext__()
344
345
346
            if not isinstance(ret, list):
                ret = [ret]
            if end_point == "/v1/chat/completions":
347
348
349
                responses = v1_chat_generate_response(
                    request,
                    ret,
350
                    created,
351
                    to_file=True,
352
353
                    cache_report=tokenizer_manager.server_args.enable_cache_report,
                    tool_call_parser=tokenizer_manager.server_args.tool_call_parser,
354
                )
355
            else:
yichuan~'s avatar
yichuan~ committed
356
                responses = v1_generate_response(
357
358
359
360
361
362
                    request,
                    ret,
                    tokenizer_manager,
                    created,
                    to_file=True,
                    cache_report=tokenizer_manager.server_args.enable_cache_report,
yichuan~'s avatar
yichuan~ committed
363
                )
364
365

        except Exception as e:
366
367
            logger.error(f"error: {get_exception_traceback()}")
            responses = []
368
369
370
371
372
373
374
375
376
377
            error_json = {
                "id": f"batch_req_{uuid.uuid4()}",
                "custom_id": request_data.get("custom_id"),
                "response": None,
                "error": {"message": str(e)},
            }
            all_ret.append(error_json)
            failed_requests += len(file_request_list)

        for idx, response in enumerate(responses):
378
            # the batch_req here can be changed to be named within a batch granularity
379
380
381
382
383
384
385
386
            response_json = {
                "id": f"batch_req_{uuid.uuid4()}",
                "custom_id": file_request_list[idx].get("custom_id"),
                "response": response,
                "error": None,
            }
            all_ret.append(response_json)
            completed_requests += 1
387

388
389
390
391
392
393
394
395
396
397
398
399
        # Write results to a new file
        output_file_id = f"backend_result_file-{uuid.uuid4()}"
        global storage_dir
        output_file_path = os.path.join(storage_dir, f"{output_file_id}.jsonl")
        with open(output_file_path, "w", encoding="utf-8") as f:
            for ret in all_ret:
                f.write(json.dumps(ret) + "\n")

        # Update batch response with output file information
        retrieve_batch = batch_storage[batch_id]
        retrieve_batch.output_file_id = output_file_id
        file_id_storage[output_file_id] = output_file_path
400
401
402
403
404
405
406
        file_id_response[output_file_id] = FileResponse(
            id=output_file_id,
            bytes=os.path.getsize(output_file_path),
            created_at=int(time.time()),
            filename=f"{output_file_id}.jsonl",
            purpose="batch_result",
        )
407
408
409
410
411
412
413
414
415
416
        # Update batch status to "completed"
        retrieve_batch.status = "completed"
        retrieve_batch.completed_at = int(time.time())
        retrieve_batch.request_counts = {
            "total": total_requests,
            "completed": completed_requests,
            "failed": failed_requests,
        }

    except Exception as e:
417
        logger.error(f"error: {e}")
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
        # Update batch status to "failed"
        retrieve_batch = batch_storage[batch_id]
        retrieve_batch.status = "failed"
        retrieve_batch.failed_at = int(time.time())
        retrieve_batch.errors = {"message": str(e)}


async def v1_retrieve_batch(batch_id: str):
    # Retrieve the batch job from the in-memory storage
    batch_response = batch_storage.get(batch_id)
    if batch_response is None:
        raise HTTPException(status_code=404, detail="Batch not found")

    return batch_response


434
async def v1_cancel_batch(tokenizer_manager, batch_id: str):
435
436
437
438
439
440
441
442
443
444
    # Retrieve the batch job from the in-memory storage
    batch_response = batch_storage.get(batch_id)
    if batch_response is None:
        raise HTTPException(status_code=404, detail="Batch not found")

    # Only do cancal when status is "validating" or "in_progress"
    if batch_response.status in ["validating", "in_progress"]:
        # Start cancelling the batch asynchronously
        asyncio.create_task(
            cancel_batch(
445
                tokenizer_manager=tokenizer_manager,
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
                batch_id=batch_id,
                input_file_id=batch_response.input_file_id,
            )
        )

        # Update batch status to "cancelling"
        batch_response.status = "cancelling"

        return batch_response
    else:
        raise HTTPException(
            status_code=500,
            detail=f"Current status is {batch_response.status}, no need to cancel",
        )


462
async def cancel_batch(tokenizer_manager, batch_id: str, input_file_id: str):
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
    try:
        # Update the batch status to "cancelling"
        batch_storage[batch_id].status = "cancelling"

        # Retrieve the input file content
        input_file_request = file_id_request.get(input_file_id)
        if not input_file_request:
            raise ValueError("Input file not found")

        # Parse the JSONL file and process each request
        input_file_path = file_id_storage.get(input_file_id)
        with open(input_file_path, "r", encoding="utf-8") as f:
            lines = f.readlines()

        # Cancel requests by request_ids
478
479
        for line_id in range(len(lines)):
            rid = f"{batch_id}-req_{line_id}"
480
            tokenizer_manager.abort_request(rid=rid)
481
482
483
484
485
486
487
488
489
490
491
492
493

        retrieve_batch = batch_storage[batch_id]
        retrieve_batch.status = "cancelled"

    except Exception as e:
        logger.error("error in SGLang:", e)
        # Update batch status to "failed"
        retrieve_batch = batch_storage[batch_id]
        retrieve_batch.status = "failed"
        retrieve_batch.failed_at = int(time.time())
        retrieve_batch.errors = {"message": str(e)}


494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
async def v1_retrieve_file(file_id: str):
    # Retrieve the batch job from the in-memory storage
    file_response = file_id_response.get(file_id)
    if file_response is None:
        raise HTTPException(status_code=404, detail="File not found")
    return file_response


async def v1_retrieve_file_content(file_id: str):
    file_pth = file_id_storage.get(file_id)
    if not file_pth or not os.path.exists(file_pth):
        raise HTTPException(status_code=404, detail="File not found")

    def iter_file():
        with open(file_pth, mode="rb") as file_like:
            yield from file_like

    return StreamingResponse(iter_file(), media_type="application/octet-stream")


514
515
516
def v1_generate_request(
    all_requests: List[CompletionRequest], request_ids: List[str] = None
):
517
518
519
520
521
522
523
524
525
526
527
    if len(all_requests) > 1:
        first_prompt_type = type(all_requests[0].prompt)
        for request in all_requests:
            assert (
                type(request.prompt) is first_prompt_type
            ), "All prompts must be of the same type in file input settings"
            if request.n > 1:
                raise ValueError(
                    "Parallel sampling is not supported for completions from files"
                )

528
529
    prompts = []
    sampling_params_list = []
530
    return_logprobs = []
531
    logprob_start_lens = []
532
    top_logprobs_nums = []
533
    lora_paths = []
yichuan~'s avatar
yichuan~ committed
534

535
    for request in all_requests:
536
        # NOTE: with openai API, the prompt's logprobs are always not computed
537
        if request.echo and request.logprobs:
538
            logger.warning(
539
                "Echo is not compatible with logprobs. "
540
                "To compute logprobs of input prompt, please use the native /generate API."
541
542
            )

543
544
545
546
547
        prompt = request.prompt
        if is_completion_template_defined():
            prompt = generate_completion_prompt_from_request(request)
        prompts.append(prompt)

548
        lora_paths.append(request.lora_path)
549
550
551
552
        if request.echo and request.logprobs:
            current_logprob_start_len = 0
        else:
            current_logprob_start_len = -1
553
554
555
556
557
558
559
560
        sampling_params_list.append(
            {
                "temperature": request.temperature,
                "max_new_tokens": request.max_tokens,
                "min_new_tokens": request.min_tokens,
                "stop": request.stop,
                "stop_token_ids": request.stop_token_ids,
                "top_p": request.top_p,
561
562
                "top_k": request.top_k,
                "min_p": request.min_p,
563
564
565
566
567
                "presence_penalty": request.presence_penalty,
                "frequency_penalty": request.frequency_penalty,
                "repetition_penalty": request.repetition_penalty,
                "regex": request.regex,
                "json_schema": request.json_schema,
568
                "ebnf": request.ebnf,
569
570
                "n": request.n,
                "no_stop_trim": request.no_stop_trim,
571
572
                "ignore_eos": request.ignore_eos,
                "skip_special_tokens": request.skip_special_tokens,
573
574
            }
        )
575
        return_logprobs.append(request.logprobs is not None)
576
        logprob_start_lens.append(current_logprob_start_len)
577
578
579
        top_logprobs_nums.append(
            request.logprobs if request.logprobs is not None else 0
        )
580
581

    if len(all_requests) == 1:
582
583
584
585
        if isinstance(prompts[0], str) or isinstance(prompts[0][0], str):
            prompt_kwargs = {"text": prompts[0]}
        else:
            prompt_kwargs = {"input_ids": prompts[0]}
586
        sampling_params_list = sampling_params_list[0]
587
        return_logprobs = return_logprobs[0]
588
        logprob_start_lens = logprob_start_lens[0]
589
        top_logprobs_nums = top_logprobs_nums[0]
590
        lora_paths = lora_paths[0]
591
    else:
592
        if isinstance(prompts[0], str) or isinstance(prompts[0][0], str):
593
594
595
            prompt_kwargs = {"text": prompts}
        else:
            prompt_kwargs = {"input_ids": prompts}
yichuan~'s avatar
yichuan~ committed
596

597
    adapted_request = GenerateReqInput(
598
        **prompt_kwargs,
599
        sampling_params=sampling_params_list,
600
601
        return_logprob=return_logprobs,
        top_logprobs_num=top_logprobs_nums,
602
        logprob_start_len=logprob_start_lens,
603
        return_text_in_logprobs=True,
604
        stream=all_requests[0].stream,
605
        rid=request_ids,
606
        lora_path=lora_paths,
607
    )
yichuan~'s avatar
yichuan~ committed
608

609
    return adapted_request, all_requests if len(all_requests) > 1 else all_requests[0]
610
611


612
613
614
def v1_generate_response(
    request, ret, tokenizer_manager, created, to_file=False, cache_report=False
):
615
616
617
    choices = []
    echo = False

yichuan~'s avatar
yichuan~ committed
618
    if (not isinstance(request, list)) and request.echo:
619
        # TODO: handle the case prompt is token ids
yichuan~'s avatar
yichuan~ committed
620
621
        if isinstance(request.prompt, list) and isinstance(request.prompt[0], str):
            # for the case of multiple str prompts
622
            prompts = request.prompt
yichuan~'s avatar
yichuan~ committed
623
624
625
        elif isinstance(request.prompt, list) and isinstance(request.prompt[0], list):
            # for the case of multiple token ids prompts
            prompts = [
626
                tokenizer_manager.tokenizer.decode(prompt, skip_special_tokens=True)
yichuan~'s avatar
yichuan~ committed
627
628
629
630
631
                for prompt in request.prompt
            ]
        elif isinstance(request.prompt, list) and isinstance(request.prompt[0], int):
            # for the case of single token ids prompt
            prompts = [
632
633
634
                tokenizer_manager.tokenizer.decode(
                    request.prompt, skip_special_tokens=True
                )
yichuan~'s avatar
yichuan~ committed
635
            ]
636
        else:
yichuan~'s avatar
yichuan~ committed
637
            # for the case of single str prompt
638
639
640
641
642
            prompts = [request.prompt]
        echo = True

    for idx, ret_item in enumerate(ret):
        text = ret_item["text"]
yichuan~'s avatar
yichuan~ committed
643
        if isinstance(request, list) and request[idx].echo:
644
645
            echo = True
            text = request[idx].prompt + text
646
        if echo and not isinstance(request, list):
yichuan~'s avatar
yichuan~ committed
647
648
            prompt_index = idx // request.n
            text = prompts[prompt_index] + text
649
650

        logprobs = False
651
        if isinstance(request, list) and request[idx].logprobs is not None:
652
            logprobs = True
653
        elif (not isinstance(request, list)) and request.logprobs is not None:
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
            logprobs = True
        if logprobs:
            if echo:
                input_token_logprobs = ret_item["meta_info"]["input_token_logprobs"]
                input_top_logprobs = ret_item["meta_info"]["input_top_logprobs"]
            else:
                input_token_logprobs = None
                input_top_logprobs = None

            logprobs = to_openai_style_logprobs(
                input_token_logprobs=input_token_logprobs,
                input_top_logprobs=input_top_logprobs,
                output_token_logprobs=ret_item["meta_info"]["output_token_logprobs"],
                output_top_logprobs=ret_item["meta_info"]["output_top_logprobs"],
            )
        else:
            logprobs = None

672
673
        finish_reason = ret_item["meta_info"]["finish_reason"]

674
        if to_file:
675
            # to make the choice data json serializable
676
677
678
679
            choice_data = {
                "index": 0,
                "text": text,
                "logprobs": logprobs,
680
                "finish_reason": finish_reason["type"] if finish_reason else None,
681
682
683
684
                "matched_stop": (
                    finish_reason["matched"]
                    if finish_reason and "matched" in finish_reason
                    else None
685
                ),
686
687
688
689
690
691
            }
        else:
            choice_data = CompletionResponseChoice(
                index=idx,
                text=text,
                logprobs=logprobs,
692
                finish_reason=finish_reason["type"] if finish_reason else None,
693
694
695
696
                matched_stop=(
                    finish_reason["matched"]
                    if finish_reason and "matched" in finish_reason
                    else None
697
                ),
698
699
700
701
702
703
704
705
706
707
708
            )

        choices.append(choice_data)

    if to_file:
        responses = []
        for i, choice in enumerate(choices):
            response = {
                "status_code": 200,
                "request_id": ret[i]["meta_info"]["id"],
                "body": {
709
                    # remain the same but if needed we can change that
710
711
                    "id": ret[i]["meta_info"]["id"],
                    "object": "text_completion",
712
                    "created": created,
713
714
715
716
717
718
719
720
721
722
723
724
725
726
                    "model": request[i].model,
                    "choices": choice,
                    "usage": {
                        "prompt_tokens": ret[i]["meta_info"]["prompt_tokens"],
                        "completion_tokens": ret[i]["meta_info"]["completion_tokens"],
                        "total_tokens": ret[i]["meta_info"]["prompt_tokens"]
                        + ret[i]["meta_info"]["completion_tokens"],
                    },
                    "system_fingerprint": None,
                },
            }
            responses.append(response)
        return responses
    else:
727
728
729
        prompt_tokens = sum(
            ret[i]["meta_info"]["prompt_tokens"] for i in range(0, len(ret), request.n)
        )
730
        completion_tokens = sum(item["meta_info"]["completion_tokens"] for item in ret)
731
        cached_tokens = sum(item["meta_info"].get("cached_tokens", 0) for item in ret)
732
733
734
        response = CompletionResponse(
            id=ret[0]["meta_info"]["id"],
            model=request.model,
735
            created=created,
736
737
            choices=choices,
            usage=UsageInfo(
yichuan~'s avatar
yichuan~ committed
738
                prompt_tokens=prompt_tokens,
739
                completion_tokens=completion_tokens,
yichuan~'s avatar
yichuan~ committed
740
                total_tokens=prompt_tokens + completion_tokens,
741
742
743
                prompt_tokens_details=(
                    {"cached_tokens": cached_tokens} if cache_report else None
                ),
744
745
746
747
748
            ),
        )
    return response


749
async def v1_completions(tokenizer_manager, raw_request: Request):
750
751
752
753
    try:
        request_json = await raw_request.json()
    except Exception as e:
        return create_error_response("Invalid request body, error: ", str(e))
754
    all_requests = [CompletionRequest(**request_json)]
755
    created = int(time.time())
756
    adapted_request, request = v1_generate_request(all_requests)
757
758
759
760

    if adapted_request.stream:

        async def generate_stream_resp():
761
762
763
764
            stream_buffers = {}
            n_prev_tokens = {}
            prompt_tokens = {}
            completion_tokens = {}
765
766
            cached_tokens = {}

767
            try:
768
                async for content in tokenizer_manager.generate_request(
769
770
                    adapted_request, raw_request
                ):
771
                    index = content.get("index", 0)
772
773
774
775

                    stream_buffer = stream_buffers.get(index, "")
                    n_prev_token = n_prev_tokens.get(index, 0)

776
                    text = content["text"]
777
778
                    prompt_tokens[index] = content["meta_info"]["prompt_tokens"]
                    completion_tokens[index] = content["meta_info"]["completion_tokens"]
779
                    cached_tokens[index] = content["meta_info"].get("cached_tokens", 0)
780
781
782

                    if not stream_buffer:  # The first chunk
                        if request.echo:
yichuan~'s avatar
yichuan~ committed
783
784
785
                            if isinstance(request.prompt, str):
                                # for the case of single str prompts
                                prompts = request.prompt
786
787
788
789
790
791
                            elif isinstance(request.prompt, list):
                                if isinstance(request.prompt[0], str):
                                    # for the case of multiple str prompts
                                    prompts = request.prompt[index // request.n]
                                elif isinstance(request.prompt[0], int):
                                    # for the case of single token ids prompt
792
                                    prompts = tokenizer_manager.tokenizer.decode(
793
794
795
796
797
798
                                        request.prompt, skip_special_tokens=True
                                    )
                                elif isinstance(request.prompt[0], list) and isinstance(
                                    request.prompt[0][0], int
                                ):
                                    # for the case of multiple token ids prompts
799
                                    prompts = tokenizer_manager.tokenizer.decode(
800
801
802
                                        request.prompt[index // request.n],
                                        skip_special_tokens=True,
                                    )
yichuan~'s avatar
yichuan~ committed
803

804
                            # Prepend prompt in response text.
yichuan~'s avatar
yichuan~ committed
805
                            text = prompts + text
806

807
                    if request.logprobs is not None:
808
809
                        # The first chunk and echo is enabled.
                        if not stream_buffer and request.echo:
810
811
                            input_token_logprobs = content["meta_info"][
                                "input_token_logprobs"
812
                            ]
813
814
                            input_top_logprobs = content["meta_info"][
                                "input_top_logprobs"
815
816
                            ]
                        else:
817
818
                            input_token_logprobs = None
                            input_top_logprobs = None
819
820

                        logprobs = to_openai_style_logprobs(
821
822
823
824
                            input_token_logprobs=input_token_logprobs,
                            input_top_logprobs=input_top_logprobs,
                            output_token_logprobs=content["meta_info"][
                                "output_token_logprobs"
825
                            ][n_prev_token:],
826
827
                            output_top_logprobs=content["meta_info"][
                                "output_top_logprobs"
828
                            ][n_prev_token:],
829
                        )
830
                        n_prev_token = len(
831
                            content["meta_info"]["output_token_logprobs"]
832
                        )
833
                    else:
834
                        logprobs = None
835

836
                    delta = text[len(stream_buffer) :]
Liangsheng Yin's avatar
Liangsheng Yin committed
837
                    stream_buffer = stream_buffer + delta
838
                    finish_reason = content["meta_info"]["finish_reason"]
839
                    choice_data = CompletionResponseStreamChoice(
840
                        index=index,
841
842
                        text=delta,
                        logprobs=logprobs,
843
                        finish_reason=finish_reason["type"] if finish_reason else None,
844
845
846
847
                        matched_stop=(
                            finish_reason["matched"]
                            if finish_reason and "matched" in finish_reason
                            else None
848
                        ),
849
850
851
                    )
                    chunk = CompletionStreamResponse(
                        id=content["meta_info"]["id"],
852
                        created=created,
853
854
855
856
                        object="text_completion",
                        choices=[choice_data],
                        model=request.model,
                    )
857
858
859
860

                    stream_buffers[index] = stream_buffer
                    n_prev_tokens[index] = n_prev_token

861
                    yield f"data: {chunk.model_dump_json()}\n\n"
862
                if request.stream_options and request.stream_options.include_usage:
863
864
865
866
867
868
869
870
                    total_prompt_tokens = sum(
                        tokens
                        for i, tokens in prompt_tokens.items()
                        if i % request.n == 0
                    )
                    total_completion_tokens = sum(
                        tokens for tokens in completion_tokens.values()
                    )
871
872
873
874
875
876
877
878
                    cache_report = tokenizer_manager.server_args.enable_cache_report
                    if cache_report:
                        cached_tokens_sum = sum(
                            tokens for tokens in cached_tokens.values()
                        )
                        prompt_tokens_details = {"cached_tokens": cached_tokens_sum}
                    else:
                        prompt_tokens_details = None
879
                    usage = UsageInfo(
880
881
882
                        prompt_tokens=total_prompt_tokens,
                        completion_tokens=total_completion_tokens,
                        total_tokens=total_prompt_tokens + total_completion_tokens,
883
                        prompt_tokens_details=prompt_tokens_details,
884
885
886
                    )

                    final_usage_chunk = CompletionStreamResponse(
887
                        id=content["meta_info"]["id"],
888
                        created=created,
889
890
891
892
893
                        choices=[],
                        model=request.model,
                        usage=usage,
                    )
                    final_usage_data = final_usage_chunk.model_dump_json(
894
                        exclude_none=True
895
896
                    )
                    yield f"data: {final_usage_data}\n\n"
897
898
899
            except ValueError as e:
                error = create_streaming_error_response(str(e))
                yield f"data: {error}\n\n"
900
901
            yield "data: [DONE]\n\n"

902
903
904
        return StreamingResponse(
            generate_stream_resp(),
            media_type="text/event-stream",
905
            background=tokenizer_manager.create_abort_task(adapted_request),
906
        )
907
908

    # Non-streaming response.
909
    try:
910
        ret = await tokenizer_manager.generate_request(
911
912
            adapted_request, raw_request
        ).__anext__()
913
914
    except ValueError as e:
        return create_error_response(str(e))
915

916
917
918
    if not isinstance(ret, list):
        ret = [ret]

919
920
921
922
923
924
925
    response = v1_generate_response(
        request,
        ret,
        tokenizer_manager,
        created,
        cache_report=tokenizer_manager.server_args.enable_cache_report,
    )
926
    return response
927

928

929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
def _get_enable_thinking_from_request(request_obj):
    """Extracts the 'enable_thinking' flag from request chat_template_kwargs.

    Args:
        request_obj: The request object (or an item from a list of requests).

    Returns:
        The boolean value of 'enable_thinking' if found and not True, otherwise True.
    """
    if (
        hasattr(request_obj, "chat_template_kwargs")
        and request_obj.chat_template_kwargs
        and request_obj.chat_template_kwargs.get("enable_thinking") is not None
    ):
        return request_obj.chat_template_kwargs.get("enable_thinking")
    return True


947
def v1_chat_generate_request(
948
    all_requests: List[ChatCompletionRequest],
949
    tokenizer_manager,
950
    request_ids: List[str] = None,
951
):
952
    input_ids = []
Mick's avatar
Mick committed
953
    prompts = []
954
955
    sampling_params_list = []
    image_data_list = []
Mick's avatar
Mick committed
956
    audio_data_list = []
957
    return_logprobs = []
958
    logprob_start_lens = []
959
    top_logprobs_nums = []
960
    modalities_list = []
961
    lora_paths = []
962
963
964

    # NOTE: with openai API, the prompt's logprobs are always not computed

965
    is_multimodal = tokenizer_manager.model_config.is_multimodal
966
967
968
969
970
    for request in all_requests:
        # Prep the data needed for the underlying GenerateReqInput:
        #  - prompt: The full prompt string.
        #  - stop: Custom stop tokens.
        #  - image_data: None or a list of image strings (URLs or base64 strings).
Mick's avatar
Mick committed
971
        #  - audio_data: None or a list of audio strings (URLs).
972
        #    None skips any image processing in GenerateReqInput.
973
        strict_tag = None
Mick's avatar
Mick committed
974
        prompt = ""
975
        prompt_ids = []
976
977
        if not isinstance(request.messages, str):
            # Apply chat template and its stop strings.
Tanjiro's avatar
Tanjiro committed
978
979
980
981
982
983
984
985
986
987
988
989
            tools = None
            if request.tools and request.tool_choice != "none":
                request.skip_special_tokens = False
                if not isinstance(request.tool_choice, str):
                    tools = [
                        item.function.model_dump()
                        for item in request.tools
                        if item.function.name == request.tool_choice.function.name
                    ]
                else:
                    tools = [item.function.model_dump() for item in request.tools]

990
991
992
993
                tool_call_parser = tokenizer_manager.server_args.tool_call_parser
                parser = FunctionCallParser(request.tools, tool_call_parser)
                strict_tag = parser.get_structure_tag()

994
            if chat_template_name is None:
995
                openai_compatible_messages = []
996

997
                for message in request.messages:
998
999
                    if message.content is None:
                        message.content = ""
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
                    msg_dict = message.dict()
                    if isinstance(msg_dict.get("content"), list):
                        for chunk in msg_dict["content"]:
                            if isinstance(chunk, dict) and chunk.get("type") == "text":
                                new_msg = msg_dict.copy()
                                new_msg["content"] = chunk["text"]
                                new_msg = {
                                    k: v for k, v in new_msg.items() if v is not None
                                }
                                openai_compatible_messages.append(new_msg)
1010
                    else:
1011
1012
                        msg_dict = {k: v for k, v in msg_dict.items() if v is not None}
                        openai_compatible_messages.append(msg_dict)
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
                if (
                    openai_compatible_messages
                    and openai_compatible_messages[-1]["role"] == "assistant"
                ):
                    if request.continue_final_message:
                        # Remove the final assistant message so its content can be continued.
                        assistant_prefix = openai_compatible_messages[-1]["content"]
                        openai_compatible_messages = openai_compatible_messages[:-1]
                    else:
                        assistant_prefix = None
1023
1024
                else:
                    assistant_prefix = None
YAMY's avatar
YAMY committed
1025
1026

                try:
1027
                    prompt_ids = tokenizer_manager.tokenizer.apply_chat_template(
YAMY's avatar
YAMY committed
1028
1029
1030
1031
                        openai_compatible_messages,
                        tokenize=True,
                        add_generation_prompt=True,
                        tools=tools,
1032
1033
1034
1035
1036
                        **(
                            request.chat_template_kwargs
                            if request.chat_template_kwargs
                            else {}
                        ),
YAMY's avatar
YAMY committed
1037
1038
1039
                    )
                except:
                    #  This except branch will be triggered when the chosen model
Mick's avatar
Mick committed
1040
                    #  has a different tools input format that is not compatible
YAMY's avatar
YAMY committed
1041
1042
                    #  with openAI's apply_chat_template tool_call format, like Mistral.
                    tools = [t if "function" in t else {"function": t} for t in tools]
1043
                    prompt_ids = tokenizer_manager.tokenizer.apply_chat_template(
YAMY's avatar
YAMY committed
1044
1045
1046
1047
                        openai_compatible_messages,
                        tokenize=True,
                        add_generation_prompt=True,
                        tools=tools,
1048
1049
1050
1051
1052
                        **(
                            request.chat_template_kwargs
                            if request.chat_template_kwargs
                            else {}
                        ),
YAMY's avatar
YAMY committed
1053
1054
                    )

1055
                if assistant_prefix:
1056
1057
1058
1059
1060
                    encoded = tokenizer_manager.tokenizer.encode(assistant_prefix)
                    if (
                        encoded
                        and encoded[0] == tokenizer_manager.tokenizer.bos_token_id
                    ):
1061
1062
                        encoded = encoded[1:]
                    prompt_ids += encoded
1063
                if is_multimodal:
1064
                    prompt = tokenizer_manager.tokenizer.decode(prompt_ids)
1065
1066
                stop = request.stop
                image_data = None
Mick's avatar
Mick committed
1067
                audio_data = None
1068
                modalities = []
1069
            else:
1070
                conv = generate_chat_conv(request, chat_template_name)
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
                # If we should continue the final assistant message, adjust the conversation.
                if (
                    request.continue_final_message
                    and request.messages
                    and request.messages[-1].role == "assistant"
                ):
                    # Remove the auto-added blank assistant turn, if present.
                    if conv.messages and conv.messages[-1][1] is None:
                        conv.messages.pop()
                    # Rebuild the prompt from the conversation.
                    prompt = conv.get_prompt()
                    # Strip any trailing stop tokens or separators that indicate end-of-assistant.
                    if isinstance(conv.stop_str, list):
                        for stop_token in conv.stop_str:
                            if prompt.endswith(stop_token):
                                prompt = prompt[: -len(stop_token)]
                    elif isinstance(conv.stop_str, str) and prompt.endswith(
                        conv.stop_str
                    ):
                        prompt = prompt[: -len(conv.stop_str)]
                    if conv.sep and prompt.endswith(conv.sep):
                        prompt = prompt[: -len(conv.sep)]
                    if getattr(conv, "sep2", None) and prompt.endswith(conv.sep2):
                        prompt = prompt[: -len(conv.sep2)]
                else:
                    prompt = conv.get_prompt()

1098
                image_data = conv.image_data
Mick's avatar
Mick committed
1099
                audio_data = conv.audio_data
1100
                modalities = conv.modalities
1101
1102
                stop = conv.stop_str or [] if not request.ignore_eos else []

1103
1104
1105
1106
1107
                if request.stop:
                    if isinstance(request.stop, str):
                        stop.append(request.stop)
                    else:
                        stop.extend(request.stop)
1108

1109
1110
                if not is_multimodal:
                    prompt_ids = tokenizer_manager.tokenizer.encode(prompt)
1111
        else:
1112
            # Use the raw prompt and stop strings if the messages is already a string.
yichuan~'s avatar
yichuan~ committed
1113
            prompt_ids = request.messages
1114
1115
            stop = request.stop
            image_data = None
Mick's avatar
Mick committed
1116
            audio_data = None
1117
            modalities = []
Mick's avatar
Mick committed
1118
            prompt = request.messages
1119
        input_ids.append(prompt_ids)
1120
        return_logprobs.append(request.logprobs)
1121
        logprob_start_lens.append(-1)
1122
        top_logprobs_nums.append(request.top_logprobs or 0)
1123
        lora_paths.append(request.lora_path)
Mick's avatar
Mick committed
1124
        prompts.append(prompt)
1125
1126
1127

        sampling_params = {
            "temperature": request.temperature,
1128
            "max_new_tokens": request.max_tokens or request.max_completion_tokens,
1129
1130
1131
1132
            "min_new_tokens": request.min_tokens,
            "stop": stop,
            "stop_token_ids": request.stop_token_ids,
            "top_p": request.top_p,
1133
1134
            "top_k": request.top_k,
            "min_p": request.min_p,
1135
1136
1137
1138
            "presence_penalty": request.presence_penalty,
            "frequency_penalty": request.frequency_penalty,
            "repetition_penalty": request.repetition_penalty,
            "regex": request.regex,
1139
            "ebnf": request.ebnf,
1140
            "n": request.n,
1141
            "no_stop_trim": request.no_stop_trim,
1142
            "ignore_eos": request.ignore_eos,
1143
            "skip_special_tokens": request.skip_special_tokens,
1144
        }
1145

1146
1147
1148
1149
        if request.response_format and request.response_format.type == "json_schema":
            sampling_params["json_schema"] = convert_json_schema_to_str(
                request.response_format.json_schema.schema_
            )
1150
1151
        elif request.response_format and request.response_format.type == "json_object":
            sampling_params["json_schema"] = '{"type": "object"}'
1152
1153
1154
1155
1156
1157
        elif (
            request.response_format and request.response_format.type == "structural_tag"
        ):
            sampling_params["structural_tag"] = convert_json_schema_to_str(
                request.response_format.model_dump(by_alias=True)
            )
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173

        if strict_tag is not None:
            if (
                sampling_params.get("regex")
                or sampling_params.get("ebnf")
                or sampling_params.get("structural_tag")
                or sampling_params.get("json_schema")
            ):
                logger.warning(
                    "Constrained decoding is not compatible with tool calls."
                )
            else:
                sampling_params["structural_tag"] = convert_json_schema_to_str(
                    strict_tag.model_dump(by_alias=True)
                )

1174
1175
        sampling_params_list.append(sampling_params)

1176
        image_data_list.append(image_data)
Mick's avatar
Mick committed
1177
        audio_data_list.append(audio_data)
1178
        modalities_list.append(modalities)
1179
    if len(all_requests) == 1:
1180
        if is_multimodal:
Mick's avatar
Mick committed
1181
1182
            # processor will need text input
            prompt_kwargs = {"text": prompts[0]}
yichuan~'s avatar
yichuan~ committed
1183
        else:
Mick's avatar
Mick committed
1184
1185
1186
1187
            if isinstance(input_ids[0], str):
                prompt_kwargs = {"text": input_ids[0]}
            else:
                prompt_kwargs = {"input_ids": input_ids[0]}
1188
        sampling_params_list = sampling_params_list[0]
1189
        image_data_list = image_data_list[0]
Mick's avatar
Mick committed
1190
        audio_data_list = audio_data_list[0]
1191
        return_logprobs = return_logprobs[0]
1192
        logprob_start_lens = logprob_start_lens[0]
1193
        top_logprobs_nums = top_logprobs_nums[0]
1194
        modalities_list = modalities_list[0]
1195
        lora_paths = lora_paths[0]
1196
        request_ids = request_ids[0]
yichuan~'s avatar
yichuan~ committed
1197
    else:
Mick's avatar
Mick committed
1198
1199
1200
        if tokenizer_manager.model_config.is_multimodal:
            # processor will need text input
            prompt_kwargs = {"text": prompts}
yichuan~'s avatar
yichuan~ committed
1201
        else:
Mick's avatar
Mick committed
1202
1203
1204
1205
            if isinstance(input_ids[0], str):
                prompt_kwargs = {"text": input_ids}
            else:
                prompt_kwargs = {"input_ids": input_ids}
1206

1207
    adapted_request = GenerateReqInput(
yichuan~'s avatar
yichuan~ committed
1208
        **prompt_kwargs,
1209
        image_data=image_data_list,
Mick's avatar
Mick committed
1210
        audio_data=audio_data_list,
1211
        sampling_params=sampling_params_list,
1212
        return_logprob=return_logprobs,
1213
        logprob_start_len=logprob_start_lens,
1214
1215
1216
        top_logprobs_num=top_logprobs_nums,
        stream=all_requests[0].stream,
        return_text_in_logprobs=True,
1217
        rid=request_ids,
1218
        modalities=modalities_list,
1219
        lora_path=lora_paths,
1220
        bootstrap_host=all_requests[0].bootstrap_host,
1221
        bootstrap_port=all_requests[0].bootstrap_port,
1222
        bootstrap_room=all_requests[0].bootstrap_room,
1223
    )
1224
1225

    return adapted_request, all_requests if len(all_requests) > 1 else all_requests[0]
1226

1227

YAMY's avatar
YAMY committed
1228
def v1_chat_generate_response(
Xihuai Wang's avatar
Xihuai Wang committed
1229
1230
    request,
    ret,
1231
    created,
Xihuai Wang's avatar
Xihuai Wang committed
1232
1233
1234
1235
    to_file=False,
    cache_report=False,
    tool_call_parser=None,
    reasoning_parser=None,
YAMY's avatar
YAMY committed
1236
):
1237
1238
1239
    choices = []

    for idx, ret_item in enumerate(ret):
1240
        logprobs = False
yichuan~'s avatar
yichuan~ committed
1241
        if isinstance(request, list) and request[idx].logprobs:
1242
            logprobs = True
yichuan~'s avatar
yichuan~ committed
1243
        elif (not isinstance(request, list)) and request.logprobs:
1244
1245
1246
1247
            logprobs = True
        if logprobs:
            logprobs = to_openai_style_logprobs(
                output_token_logprobs=ret_item["meta_info"]["output_token_logprobs"],
1248
1249
1250
                output_top_logprobs=ret_item["meta_info"].get(
                    "output_top_logprobs", None
                ),
1251
1252
            )
            token_logprobs = []
1253
1254
1255
            for token_idx, (token, logprob) in enumerate(
                zip(logprobs.tokens, logprobs.token_logprobs)
            ):
1256
1257
1258
                token_bytes = list(token.encode("utf-8"))
                top_logprobs = []
                if logprobs.top_logprobs:
1259
1260
1261
                    for top_token, top_logprob in logprobs.top_logprobs[
                        token_idx
                    ].items():
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
                        top_token_bytes = list(top_token.encode("utf-8"))
                        top_logprobs.append(
                            TopLogprob(
                                token=top_token,
                                bytes=top_token_bytes,
                                logprob=top_logprob,
                            )
                        )
                token_logprobs.append(
                    ChatCompletionTokenLogprob(
                        token=token,
                        bytes=token_bytes,
                        logprob=logprob,
                        top_logprobs=top_logprobs,
                    )
                )

            choice_logprobs = ChoiceLogprobs(content=token_logprobs)
        else:
            choice_logprobs = None
1282

1283
1284
        finish_reason = ret_item["meta_info"]["finish_reason"]

Tanjiro's avatar
Tanjiro committed
1285
1286
1287
1288
1289
1290
        tool_calls = None
        text = ret_item["text"]

        if isinstance(request, list):
            tool_choice = request[idx].tool_choice
            tools = request[idx].tools
Xihuai Wang's avatar
Xihuai Wang committed
1291
            separate_reasoning = request[idx].separate_reasoning
1292
            enable_thinking = _get_enable_thinking_from_request(request[idx])
Tanjiro's avatar
Tanjiro committed
1293
1294
1295
        else:
            tool_choice = request.tool_choice
            tools = request.tools
Xihuai Wang's avatar
Xihuai Wang committed
1296
            separate_reasoning = request.separate_reasoning
1297
            enable_thinking = _get_enable_thinking_from_request(request)
1298
1299
1300

        reasoning_text = None
        if reasoning_parser and separate_reasoning and enable_thinking:
Xihuai Wang's avatar
Xihuai Wang committed
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
            try:
                parser = ReasoningParser(
                    model_type=reasoning_parser, stream_reasoning=False
                )
                reasoning_text, text = parser.parse_non_stream(text)
            except Exception as e:
                logger.error(f"Exception: {e}")
                return create_error_response(
                    HTTPStatus.BAD_REQUEST,
                    "Failed to parse reasoning related info to json format!",
                )
Tanjiro's avatar
Tanjiro committed
1312

1313
1314
1315
1316
1317
1318
1319
        if tool_choice != "none" and tools:
            parser = FunctionCallParser(tools, tool_call_parser)
            if parser.has_tool_call(text):
                if finish_reason["type"] == "stop":
                    finish_reason["type"] = "tool_calls"
                    finish_reason["matched"] = None
                try:
1320
                    text, call_info_list = parser.parse_non_stream(text)
1321
1322
                    tool_calls = [
                        ToolCall(
1323
1324
                            id=f"call_{base64.urlsafe_b64encode(uuid.uuid4().bytes).rstrip(b'=').decode()}",
                            index=call_info.tool_index,
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
                            function=FunctionResponse(
                                name=call_info.name, arguments=call_info.parameters
                            ),
                        )
                        for call_info in call_info_list
                    ]
                except Exception as e:
                    logger.error(f"Exception: {e}")
                    return create_error_response(
                        HTTPStatus.BAD_REQUEST,
                        "Failed to parse fc related info to json format!",
Tanjiro's avatar
Tanjiro committed
1336
1337
                    )

1338
        if to_file:
1339
            # to make the choice data json serializable
1340
1341
            choice_data = {
                "index": 0,
Tanjiro's avatar
Tanjiro committed
1342
1343
                "message": {
                    "role": "assistant",
1344
                    "content": text if text else None,
Tanjiro's avatar
Tanjiro committed
1345
                    "tool_calls": tool_calls,
1346
                    "reasoning_content": reasoning_text if reasoning_text else None,
Tanjiro's avatar
Tanjiro committed
1347
                },
1348
                "logprobs": choice_logprobs.model_dump() if choice_logprobs else None,
1349
                "finish_reason": finish_reason["type"] if finish_reason else None,
1350
1351
1352
1353
                "matched_stop": (
                    finish_reason["matched"]
                    if finish_reason and "matched" in finish_reason
                    else None
1354
                ),
1355
            }
1356
        else:
1357
1358
            choice_data = ChatCompletionResponseChoice(
                index=idx,
Tanjiro's avatar
Tanjiro committed
1359
1360
                message=ChatMessage(
                    role="assistant",
1361
                    content=text if text else None,
Tanjiro's avatar
Tanjiro committed
1362
                    tool_calls=tool_calls,
1363
                    reasoning_content=reasoning_text if reasoning_text else None,
Tanjiro's avatar
Tanjiro committed
1364
                ),
1365
                logprobs=choice_logprobs,
1366
                finish_reason=finish_reason["type"] if finish_reason else None,
1367
1368
1369
1370
                matched_stop=(
                    finish_reason["matched"]
                    if finish_reason and "matched" in finish_reason
                    else None
1371
                ),
1372
1373
1374
            )

        choices.append(choice_data)
1375

1376
1377
1378
1379
1380
1381
1382
1383
    if to_file:
        responses = []

        for i, choice in enumerate(choices):
            response = {
                "status_code": 200,
                "request_id": ret[i]["meta_info"]["id"],
                "body": {
1384
                    # remain the same but if needed we can change that
1385
1386
                    "id": ret[i]["meta_info"]["id"],
                    "object": "chat.completion",
1387
                    "created": created,
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
                    "model": request[i].model,
                    "choices": choice,
                    "usage": {
                        "prompt_tokens": ret[i]["meta_info"]["prompt_tokens"],
                        "completion_tokens": ret[i]["meta_info"]["completion_tokens"],
                        "total_tokens": ret[i]["meta_info"]["prompt_tokens"]
                        + ret[i]["meta_info"]["completion_tokens"],
                    },
                    "system_fingerprint": None,
                },
            }
            responses.append(response)
        return responses
1401
    else:
1402
1403
1404
1405
        prompt_tokens = sum(
            ret[i]["meta_info"]["prompt_tokens"] for i in range(0, len(ret), request.n)
        )
        completion_tokens = sum(item["meta_info"]["completion_tokens"] for item in ret)
1406
        cached_tokens = sum(item["meta_info"].get("cached_tokens", 0) for item in ret)
1407
1408
        response = ChatCompletionResponse(
            id=ret[0]["meta_info"]["id"],
1409
            created=created,
1410
1411
1412
            model=request.model,
            choices=choices,
            usage=UsageInfo(
1413
1414
1415
                prompt_tokens=prompt_tokens,
                completion_tokens=completion_tokens,
                total_tokens=prompt_tokens + completion_tokens,
1416
1417
1418
                prompt_tokens_details=(
                    {"cached_tokens": cached_tokens} if cache_report else None
                ),
1419
1420
1421
            ),
        )
        return response
1422

1423

1424
1425
1426
async def v1_chat_completions(
    tokenizer_manager, raw_request: Request, cache_report=False
):
1427
1428
1429
1430
    try:
        request_json = await raw_request.json()
    except Exception as e:
        return create_error_response("Invalid request body, error: ", str(e))
1431
    all_requests = [ChatCompletionRequest(**request_json)]
1432
    created = int(time.time())
1433
1434
1435
    adapted_request, request = v1_chat_generate_request(
        all_requests, tokenizer_manager, request_ids=[all_requests[0].rid]
    )
1436
1437

    if adapted_request.stream:
YAMY's avatar
YAMY committed
1438
        parser_dict = {}
Xihuai Wang's avatar
Xihuai Wang committed
1439
        reasoning_parser_dict = {}
1440
1441

        async def generate_stream_resp():
1442
            tool_call_first = True
1443
1444
1445
1446
1447
            is_firsts = {}
            stream_buffers = {}
            n_prev_tokens = {}
            prompt_tokens = {}
            completion_tokens = {}
1448
            cached_tokens = {}
1449
            try:
1450
                async for content in tokenizer_manager.generate_request(
1451
1452
                    adapted_request, raw_request
                ):
1453
                    index = content.get("index", 0)
YAMY's avatar
YAMY committed
1454
                    text = content["text"]
1455
1456
1457
1458
1459
1460
1461

                    is_first = is_firsts.get(index, True)
                    stream_buffer = stream_buffers.get(index, "")
                    n_prev_token = n_prev_tokens.get(index, 0)

                    prompt_tokens[index] = content["meta_info"]["prompt_tokens"]
                    completion_tokens[index] = content["meta_info"]["completion_tokens"]
1462
                    cached_tokens[index] = content["meta_info"].get("cached_tokens", 0)
yichuan~'s avatar
yichuan~ committed
1463
1464
1465
1466
1467
                    if request.logprobs:
                        logprobs = to_openai_style_logprobs(
                            output_token_logprobs=content["meta_info"][
                                "output_token_logprobs"
                            ][n_prev_token:],
1468
1469
1470
                            output_top_logprobs=content["meta_info"].get(
                                "output_top_logprobs", []
                            )[n_prev_token:],
yichuan~'s avatar
yichuan~ committed
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
                        )

                        n_prev_token = len(
                            content["meta_info"]["output_token_logprobs"]
                        )
                        token_logprobs = []
                        for token, logprob in zip(
                            logprobs.tokens, logprobs.token_logprobs
                        ):
                            token_bytes = list(token.encode("utf-8"))
                            top_logprobs = []
                            if logprobs.top_logprobs:
                                for top_token, top_logprob in logprobs.top_logprobs[
                                    0
                                ].items():
                                    top_token_bytes = list(top_token.encode("utf-8"))
                                    top_logprobs.append(
                                        TopLogprob(
                                            token=top_token,
                                            bytes=top_token_bytes,
                                            logprob=top_logprob,
                                        )
                                    )
                            token_logprobs.append(
                                ChatCompletionTokenLogprob(
                                    token=token,
                                    bytes=token_bytes,
                                    logprob=logprob,
                                    top_logprobs=top_logprobs,
                                )
                            )

                        choice_logprobs = ChoiceLogprobs(content=token_logprobs)

                    else:
                        choice_logprobs = None

1508
                    finish_reason = content["meta_info"]["finish_reason"]
Xihuai Wang's avatar
Xihuai Wang committed
1509
1510
1511
                    finish_reason_type = (
                        finish_reason["type"] if finish_reason else None
                    )
1512

1513
1514
1515
                    if is_first:
                        # First chunk with role
                        is_first = False
1516
                        delta = DeltaMessage(role="assistant")
1517
                        choice_data = ChatCompletionResponseStreamChoice(
1518
                            index=index,
Xihuai Wang's avatar
Xihuai Wang committed
1519
                            delta=delta,
1520
                            finish_reason=finish_reason_type,
1521
1522
1523
1524
                            matched_stop=(
                                finish_reason["matched"]
                                if finish_reason and "matched" in finish_reason
                                else None
1525
                            ),
yichuan~'s avatar
yichuan~ committed
1526
                            logprobs=choice_logprobs,
1527
1528
1529
                        )
                        chunk = ChatCompletionStreamResponse(
                            id=content["meta_info"]["id"],
1530
                            created=created,
1531
1532
1533
1534
1535
1536
1537
                            choices=[choice_data],
                            model=request.model,
                        )
                        yield f"data: {chunk.model_dump_json()}\n\n"

                    text = content["text"]
                    delta = text[len(stream_buffer) :]
YAMY's avatar
YAMY committed
1538
                    new_stream_buffer = stream_buffer + delta
1539

1540
1541
                    enable_thinking = _get_enable_thinking_from_request(request)

Xihuai Wang's avatar
Xihuai Wang committed
1542
1543
1544
                    if (
                        tokenizer_manager.server_args.reasoning_parser
                        and request.separate_reasoning
1545
                        and enable_thinking
Xihuai Wang's avatar
Xihuai Wang committed
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
                    ):
                        if index not in reasoning_parser_dict:
                            reasoning_parser_dict[index] = ReasoningParser(
                                tokenizer_manager.server_args.reasoning_parser,
                                request.stream_reasoning,
                            )
                        reasoning_parser = reasoning_parser_dict[index]
                        reasoning_text, delta = reasoning_parser.parse_stream_chunk(
                            delta
                        )
                        if reasoning_text:
                            choice_data = ChatCompletionResponseStreamChoice(
                                index=index,
1559
1560
1561
1562
1563
                                delta=DeltaMessage(
                                    reasoning_content=(
                                        reasoning_text if reasoning_text else None
                                    )
                                ),
1564
                                finish_reason=finish_reason_type,
Xihuai Wang's avatar
Xihuai Wang committed
1565
1566
1567
                            )
                            chunk = ChatCompletionStreamResponse(
                                id=content["meta_info"]["id"],
1568
                                created=created,
Xihuai Wang's avatar
Xihuai Wang committed
1569
1570
1571
1572
1573
1574
1575
1576
1577
                                choices=[choice_data],
                                model=request.model,
                            )
                            yield f"data: {chunk.model_dump_json()}\n\n"
                        if (delta and len(delta) == 0) or not delta:
                            stream_buffers[index] = new_stream_buffer
                            is_firsts[index] = is_first
                            continue

YAMY's avatar
YAMY committed
1578
1579
1580
1581
                    if request.tool_choice != "none" and request.tools:
                        if index not in parser_dict:
                            parser_dict[index] = FunctionCallParser(
                                tools=request.tools,
1582
                                tool_call_parser=tokenizer_manager.server_args.tool_call_parser,
YAMY's avatar
YAMY committed
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
                            )
                        parser = parser_dict[index]

                        # parse_increment => returns (normal_text, calls)
                        normal_text, calls = parser.parse_stream_chunk(delta)

                        # 1) if there's normal_text, output it as normal content
                        if normal_text:
                            choice_data = ChatCompletionResponseStreamChoice(
                                index=index,
1593
1594
1595
                                delta=DeltaMessage(
                                    content=normal_text if normal_text else None
                                ),
1596
                                finish_reason=finish_reason_type,
YAMY's avatar
YAMY committed
1597
1598
1599
                            )
                            chunk = ChatCompletionStreamResponse(
                                id=content["meta_info"]["id"],
1600
                                created=created,
YAMY's avatar
YAMY committed
1601
1602
1603
1604
1605
1606
1607
1608
                                choices=[choice_data],
                                model=request.model,
                            )
                            yield f"data: {chunk.model_dump_json()}\n\n"

                        # 2) if we found calls, we output them as separate chunk(s)
                        for call_item in calls:
                            # transform call_item -> FunctionResponse + ToolCall
1609
                            if finish_reason_type == "stop":
YAMY's avatar
YAMY committed
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
                                latest_delta_len = 0
                                if isinstance(call_item.parameters, str):
                                    latest_delta_len = len(call_item.parameters)

                                expected_call = json.dumps(
                                    parser.multi_format_parser.detectors[0]
                                    .prev_tool_call_arr[index]
                                    .get("arguments", {}),
                                    ensure_ascii=False,
                                )
                                actual_call = parser.multi_format_parser.detectors[
                                    0
                                ].streamed_args_for_tool[index]
                                if latest_delta_len > 0:
                                    actual_call = actual_call[:-latest_delta_len]
                                remaining_call = expected_call.replace(
                                    actual_call, "", 1
                                )
                                call_item.parameters = remaining_call

1630
                                finish_reason_type = "tool_calls"
YAMY's avatar
YAMY committed
1631
                            tool_call = ToolCall(
1632
1633
1634
1635
1636
                                id=(
                                    f"call_{base64.urlsafe_b64encode(uuid.uuid4().bytes).rstrip(b'=').decode()}"
                                    if tool_call_first
                                    else None
                                ),
1637
                                index=call_item.tool_index,
YAMY's avatar
YAMY committed
1638
1639
1640
1641
1642
                                function=FunctionResponse(
                                    name=call_item.name,
                                    arguments=call_item.parameters,
                                ),
                            )
1643
                            tool_call_first = False
YAMY's avatar
YAMY committed
1644
1645
                            choice_data = ChatCompletionResponseStreamChoice(
                                index=index,
1646
1647
1648
1649
1650
1651
1652
                                delta=DeltaMessage(tool_calls=[tool_call]),
                                finish_reason=(
                                    None
                                    if request.stream_options
                                    and request.stream_options.include_usage
                                    else finish_reason_type
                                ),  # additional chunk will be return
YAMY's avatar
YAMY committed
1653
1654
1655
                            )
                            chunk = ChatCompletionStreamResponse(
                                id=content["meta_info"]["id"],
1656
                                created=created,
YAMY's avatar
YAMY committed
1657
1658
1659
1660
                                choices=[choice_data],
                                model=request.model,
                            )
                            yield f"data: {chunk.model_dump_json()}\n\n"
1661

YAMY's avatar
YAMY committed
1662
1663
1664
1665
1666
                        stream_buffers[index] = new_stream_buffer
                        is_firsts[index] = is_first

                    else:
                        # No tool calls => just treat this as normal text
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
                        if delta or not (
                            request.stream_options
                            and request.stream_options.include_usage
                        ):
                            choice_data = ChatCompletionResponseStreamChoice(
                                index=index,
                                delta=DeltaMessage(content=delta if delta else None),
                                finish_reason=(
                                    None
                                    if request.stream_options
                                    and request.stream_options.include_usage
                                    else finish_reason_type
                                ),
                                matched_stop=(
                                    finish_reason["matched"]
                                    if finish_reason and "matched" in finish_reason
                                    else None
                                ),
                                logprobs=choice_logprobs,
                            )
                            chunk = ChatCompletionStreamResponse(
                                id=content["meta_info"]["id"],
                                created=created,
                                choices=[choice_data],
                                model=request.model,
                            )
                            yield f"data: {chunk.model_dump_json()}\n\n"
                            stream_buffers[index] = new_stream_buffer
                            is_firsts[index] = is_first
                if finish_reason_type == "stop" and request.tool_choice != "none":
                    parser = FunctionCallParser(
                        tools=request.tools,
                        tool_call_parser=tokenizer_manager.server_args.tool_call_parser,
                    )
                    if parser.has_tool_call(new_stream_buffer):
                        # if the stream ends with empty string after tool calls
                        finish_reason_type = "tool_calls"

1705
                if request.stream_options and request.stream_options.include_usage:
1706
1707
1708
1709
1710
1711
1712
1713
                    total_prompt_tokens = sum(
                        tokens
                        for i, tokens in prompt_tokens.items()
                        if i % request.n == 0
                    )
                    total_completion_tokens = sum(
                        tokens for tokens in completion_tokens.values()
                    )
1714
1715
1716
1717
1718
1719
1720
1721
                    cache_report = tokenizer_manager.server_args.enable_cache_report
                    if cache_report:
                        cached_tokens_sum = sum(
                            tokens for tokens in cached_tokens.values()
                        )
                        prompt_tokens_details = {"cached_tokens": cached_tokens_sum}
                    else:
                        prompt_tokens_details = None
1722
                    usage = UsageInfo(
1723
1724
1725
                        prompt_tokens=total_prompt_tokens,
                        completion_tokens=total_completion_tokens,
                        total_tokens=total_prompt_tokens + total_completion_tokens,
1726
                        prompt_tokens_details=prompt_tokens_details,
1727
1728
                    )

1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
                else:
                    usage = None
                final_usage_chunk = ChatCompletionStreamResponse(
                    id=content["meta_info"]["id"],
                    created=created,
                    choices=[
                        ChatCompletionResponseStreamChoice(
                            index=index,
                            delta=DeltaMessage(),
                            finish_reason=finish_reason_type,
                        )
                    ],
                    model=request.model,
                    usage=usage,
                )
                yield f"data: {final_usage_chunk.model_dump_json()}\n\n"
1745
1746
1747
            except ValueError as e:
                error = create_streaming_error_response(str(e))
                yield f"data: {error}\n\n"
1748
1749
            yield "data: [DONE]\n\n"

1750
1751
1752
        return StreamingResponse(
            generate_stream_resp(),
            media_type="text/event-stream",
1753
            background=tokenizer_manager.create_abort_task(adapted_request),
1754
        )
1755
1756

    # Non-streaming response.
1757
    try:
1758
        ret = await tokenizer_manager.generate_request(
1759
1760
            adapted_request, raw_request
        ).__anext__()
1761
1762
    except ValueError as e:
        return create_error_response(str(e))
1763
1764
1765
    if not isinstance(ret, list):
        ret = [ret]

1766
    response = v1_chat_generate_response(
YAMY's avatar
YAMY committed
1767
1768
        request,
        ret,
1769
        created,
1770
1771
        cache_report=tokenizer_manager.server_args.enable_cache_report,
        tool_call_parser=tokenizer_manager.server_args.tool_call_parser,
Xihuai Wang's avatar
Xihuai Wang committed
1772
        reasoning_parser=tokenizer_manager.server_args.reasoning_parser,
1773
    )
1774

1775
1776
1777
    return response


1778
def v1_embedding_request(all_requests, tokenizer_manager):
1779
1780
    prompts = []
    sampling_params_list = []
Ying Sheng's avatar
Ying Sheng committed
1781
    first_prompt_type = type(all_requests[0].input)
1782
1783

    for request in all_requests:
Ying Sheng's avatar
Ying Sheng committed
1784
        prompt = request.input
1785
1786
        # Check for empty/whitespace string
        prompt = _validate_prompt(request.input)
1787
        assert (
1788
            type(prompt) is first_prompt_type
1789
1790
1791
1792
1793
1794
1795
        ), "All prompts must be of the same type in file input settings"
        prompts.append(prompt)

    if len(all_requests) == 1:
        prompt = prompts[0]
        if isinstance(prompt, str) or isinstance(prompt[0], str):
            prompt_kwargs = {"text": prompt}
1796
1797
1798
1799
1800
1801
        elif isinstance(prompt, list) and isinstance(
            prompt[0], MultimodalEmbeddingInput
        ):
            texts = []
            images = []
            for item in prompt:
uylnap's avatar
uylnap committed
1802
1803
                # TODO simply use padding for text, we should use a better way to handle this
                texts.append(item.text if item.text is not None else "padding")
1804
1805
                images.append(item.image if item.image is not None else None)
            generate_prompts = []
uylnap's avatar
uylnap committed
1806
1807
1808
1809
1810
1811
            if chat_template_name is not None:
                convs = generate_embedding_convs(texts, images, chat_template_name)
                for conv in convs:
                    generate_prompts.append(conv.get_prompt())
            else:
                generate_prompts = texts
1812
1813
1814
1815
            if len(generate_prompts) == 1:
                prompt_kwargs = {"text": generate_prompts[0], "image_data": images[0]}
            else:
                prompt_kwargs = {"text": generate_prompts, "image_data": images}
1816
1817
1818
        else:
            prompt_kwargs = {"input_ids": prompt}
    else:
Baoyuan Qi's avatar
Baoyuan Qi committed
1819
        if isinstance(prompts[0], str) or isinstance(prompts[0][0], str):
1820
            prompt_kwargs = {"text": prompts}
1821
1822
1823
1824
1825
1826
1827
        elif isinstance(prompts[0], list) and isinstance(
            prompts[0][0], MultimodalEmbeddingInput
        ):
            # TODO: multiple requests
            raise NotImplementedError(
                "Multiple requests with multimodal inputs are not supported yet"
            )
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
        else:
            prompt_kwargs = {"input_ids": prompts}

    adapted_request = EmbeddingReqInput(
        **prompt_kwargs,
    )

    if len(all_requests) == 1:
        return adapted_request, all_requests[0]
    return adapted_request, all_requests


Ying Sheng's avatar
Ying Sheng committed
1840
1841
1842
def v1_embedding_response(ret, model_path, to_file=False):
    embedding_objects = []
    prompt_tokens = 0
1843
    for idx, ret_item in enumerate(ret):
Ying Sheng's avatar
Ying Sheng committed
1844
1845
1846
        embedding_objects.append(
            EmbeddingObject(
                embedding=ret[idx]["embedding"],
1847
1848
1849
                index=idx,
            )
        )
Ying Sheng's avatar
Ying Sheng committed
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
        prompt_tokens += ret[idx]["meta_info"]["prompt_tokens"]

    return EmbeddingResponse(
        data=embedding_objects,
        model=model_path,
        usage=UsageInfo(
            prompt_tokens=prompt_tokens,
            total_tokens=prompt_tokens,
        ),
    )
1860
1861


1862
async def v1_embeddings(tokenizer_manager, raw_request: Request):
1863
1864
1865
1866
    try:
        request_json = await raw_request.json()
    except Exception as e:
        return create_error_response("Invalid request body, error: ", str(e))
1867
    all_requests = [EmbeddingRequest(**request_json)]
1868
    adapted_request, request = v1_embedding_request(all_requests, tokenizer_manager)
1869
1870

    try:
1871
        ret = await tokenizer_manager.generate_request(
1872
1873
1874
1875
1876
1877
1878
1879
            adapted_request, raw_request
        ).__anext__()
    except ValueError as e:
        return create_error_response(str(e))

    if not isinstance(ret, list):
        ret = [ret]

1880
    response = v1_embedding_response(ret, tokenizer_manager.model_path)
1881
1882
1883
1884

    return response


1885
def to_openai_style_logprobs(
1886
1887
1888
1889
    input_token_logprobs=None,
    output_token_logprobs=None,
    input_top_logprobs=None,
    output_top_logprobs=None,
1890
1891
1892
1893
1894
1895
1896
1897
):
    ret_logprobs = LogProbs()

    def append_token_logprobs(token_logprobs):
        for logprob, _, token_text in token_logprobs:
            ret_logprobs.tokens.append(token_text)
            ret_logprobs.token_logprobs.append(logprob)

1898
            # Not supported yet
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
            ret_logprobs.text_offset.append(-1)

    def append_top_logprobs(top_logprobs):
        for tokens in top_logprobs:
            if tokens is not None:
                ret_logprobs.top_logprobs.append(
                    {token[2]: token[0] for token in tokens}
                )
            else:
                ret_logprobs.top_logprobs.append(None)

1910
1911
1912
1913
1914
1915
1916
1917
    if input_token_logprobs is not None:
        append_token_logprobs(input_token_logprobs)
    if output_token_logprobs is not None:
        append_token_logprobs(output_token_logprobs)
    if input_top_logprobs is not None:
        append_top_logprobs(input_top_logprobs)
    if output_top_logprobs is not None:
        append_top_logprobs(output_top_logprobs)
1918

Liangsheng Yin's avatar
Liangsheng Yin committed
1919
    return ret_logprobs