protocol.py 8.09 KB
Newer Older
1
2
# Adapted from
# https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/protocol/openai_api_protocol.py
Zhuohan Li's avatar
Zhuohan Li committed
3
4
5
6
7
import time
from typing import Dict, List, Literal, Optional, Union

from pydantic import BaseModel, Field

Woosuk Kwon's avatar
Woosuk Kwon committed
8
from vllm.utils import random_uuid
9
from vllm.sampling_params import SamplingParams
Zhuohan Li's avatar
Zhuohan Li committed
10
11
12
13
14
15
16


class ErrorResponse(BaseModel):
    object: str = "error"
    message: str
    type: str
    param: Optional[str] = None
17
    code: int
Zhuohan Li's avatar
Zhuohan Li committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38


class ModelPermission(BaseModel):
    id: str = Field(default_factory=lambda: f"modelperm-{random_uuid()}")
    object: str = "model_permission"
    created: int = Field(default_factory=lambda: int(time.time()))
    allow_create_engine: bool = False
    allow_sampling: bool = True
    allow_logprobs: bool = True
    allow_search_indices: bool = False
    allow_view: bool = True
    allow_fine_tuning: bool = False
    organization: str = "*"
    group: Optional[str] = None
    is_blocking: str = False


class ModelCard(BaseModel):
    id: str
    object: str = "model"
    created: int = Field(default_factory=lambda: int(time.time()))
Woosuk Kwon's avatar
Woosuk Kwon committed
39
    owned_by: str = "vllm"
Zhuohan Li's avatar
Zhuohan Li committed
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
    root: Optional[str] = None
    parent: Optional[str] = None
    permission: List[ModelPermission] = Field(default_factory=list)


class ModelList(BaseModel):
    object: str = "list"
    data: List[ModelCard] = Field(default_factory=list)


class UsageInfo(BaseModel):
    prompt_tokens: int = 0
    total_tokens: int = 0
    completion_tokens: Optional[int] = 0


class ChatCompletionRequest(BaseModel):
    model: str
58
    messages: Union[str, List[Dict[str, str]]]
Zhuohan Li's avatar
Zhuohan Li committed
59
60
61
    temperature: Optional[float] = 0.7
    top_p: Optional[float] = 1.0
    n: Optional[int] = 1
62
    max_tokens: Optional[int] = None
63
    stop: Optional[Union[str, List[str]]] = Field(default_factory=list)
Zhuohan Li's avatar
Zhuohan Li committed
64
65
66
    stream: Optional[bool] = False
    presence_penalty: Optional[float] = 0.0
    frequency_penalty: Optional[float] = 0.0
67
    logit_bias: Optional[Dict[str, float]] = None
Zhuohan Li's avatar
Zhuohan Li committed
68
    user: Optional[str] = None
69
70
71
72
73
    # Additional parameters supported by vLLM
    best_of: Optional[int] = None
    top_k: Optional[int] = -1
    ignore_eos: Optional[bool] = False
    use_beam_search: Optional[bool] = False
74
    stop_token_ids: Optional[List[int]] = Field(default_factory=list)
75
    skip_special_tokens: Optional[bool] = True
76
    spaces_between_special_tokens: Optional[bool] = True
77
78
    add_generation_prompt: Optional[bool] = True
    echo: Optional[bool] = False
Roy's avatar
Roy committed
79
80
    repetition_penalty: Optional[float] = 1.0
    min_p: Optional[float] = 0.0
81
82
    include_stop_str_in_output: Optional[bool] = False
    length_penalty: Optional[float] = 1.0
Zhuohan Li's avatar
Zhuohan Li committed
83

84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
    def to_sampling_params(self) -> SamplingParams:
        return SamplingParams(
            n=self.n,
            presence_penalty=self.presence_penalty,
            frequency_penalty=self.frequency_penalty,
            repetition_penalty=self.repetition_penalty,
            temperature=self.temperature,
            top_p=self.top_p,
            min_p=self.min_p,
            stop=self.stop,
            stop_token_ids=self.stop_token_ids,
            max_tokens=self.max_tokens,
            best_of=self.best_of,
            top_k=self.top_k,
            ignore_eos=self.ignore_eos,
            use_beam_search=self.use_beam_search,
            skip_special_tokens=self.skip_special_tokens,
            spaces_between_special_tokens=self.spaces_between_special_tokens,
102
103
            include_stop_str_in_output=self.include_stop_str_in_output,
            length_penalty=self.length_penalty,
104
105
        )

Zhuohan Li's avatar
Zhuohan Li committed
106
107
108

class CompletionRequest(BaseModel):
    model: str
109
110
    # a string, array of strings, array of tokens, or array of token arrays
    prompt: Union[List[int], List[List[int]], str, List[str]]
Zhuohan Li's avatar
Zhuohan Li committed
111
112
113
114
115
116
117
118
119
120
121
122
123
124
    suffix: Optional[str] = None
    max_tokens: Optional[int] = 16
    temperature: Optional[float] = 1.0
    top_p: Optional[float] = 1.0
    n: Optional[int] = 1
    stream: Optional[bool] = False
    logprobs: Optional[int] = None
    echo: Optional[bool] = False
    stop: Optional[Union[str, List[str]]] = Field(default_factory=list)
    presence_penalty: Optional[float] = 0.0
    frequency_penalty: Optional[float] = 0.0
    best_of: Optional[int] = None
    logit_bias: Optional[Dict[str, float]] = None
    user: Optional[str] = None
Woosuk Kwon's avatar
Woosuk Kwon committed
125
    # Additional parameters supported by vLLM
Zhuohan Li's avatar
Zhuohan Li committed
126
127
128
    top_k: Optional[int] = -1
    ignore_eos: Optional[bool] = False
    use_beam_search: Optional[bool] = False
129
    stop_token_ids: Optional[List[int]] = Field(default_factory=list)
130
    skip_special_tokens: Optional[bool] = True
131
    spaces_between_special_tokens: Optional[bool] = True
Roy's avatar
Roy committed
132
133
    repetition_penalty: Optional[float] = 1.0
    min_p: Optional[float] = 0.0
134
135
    include_stop_str_in_output: Optional[bool] = False
    length_penalty: Optional[float] = 1.0
Zhuohan Li's avatar
Zhuohan Li committed
136

137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
    def to_sampling_params(self):
        echo_without_generation = self.echo and self.max_tokens == 0

        return SamplingParams(
            n=self.n,
            best_of=self.best_of,
            presence_penalty=self.presence_penalty,
            frequency_penalty=self.frequency_penalty,
            repetition_penalty=self.repetition_penalty,
            temperature=self.temperature,
            top_p=self.top_p,
            top_k=self.top_k,
            min_p=self.min_p,
            stop=self.stop,
            stop_token_ids=self.stop_token_ids,
            ignore_eos=self.ignore_eos,
            max_tokens=self.max_tokens if not echo_without_generation else 1,
            logprobs=self.logprobs,
            use_beam_search=self.use_beam_search,
            prompt_logprobs=self.logprobs if self.echo else None,
            skip_special_tokens=self.skip_special_tokens,
            spaces_between_special_tokens=(self.spaces_between_special_tokens),
159
160
            include_stop_str_in_output=self.include_stop_str_in_output,
            length_penalty=self.length_penalty,
161
162
        )

Zhuohan Li's avatar
Zhuohan Li committed
163
164
165
166
167

class LogProbs(BaseModel):
    text_offset: List[int] = Field(default_factory=list)
    token_logprobs: List[Optional[float]] = Field(default_factory=list)
    tokens: List[str] = Field(default_factory=list)
168
    top_logprobs: Optional[List[Optional[Dict[int, float]]]] = None
Zhuohan Li's avatar
Zhuohan Li committed
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199


class CompletionResponseChoice(BaseModel):
    index: int
    text: str
    logprobs: Optional[LogProbs] = None
    finish_reason: Optional[Literal["stop", "length"]] = None


class CompletionResponse(BaseModel):
    id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}")
    object: str = "text_completion"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    choices: List[CompletionResponseChoice]
    usage: UsageInfo


class CompletionResponseStreamChoice(BaseModel):
    index: int
    text: str
    logprobs: Optional[LogProbs] = None
    finish_reason: Optional[Literal["stop", "length"]] = None


class CompletionStreamResponse(BaseModel):
    id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}")
    object: str = "text_completion"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    choices: List[CompletionResponseStreamChoice]
200
    usage: Optional[UsageInfo] = Field(default=None)
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239


class ChatMessage(BaseModel):
    role: str
    content: str


class ChatCompletionResponseChoice(BaseModel):
    index: int
    message: ChatMessage
    finish_reason: Optional[Literal["stop", "length"]] = None


class ChatCompletionResponse(BaseModel):
    id: str = Field(default_factory=lambda: f"chatcmpl-{random_uuid()}")
    object: str = "chat.completion"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    choices: List[ChatCompletionResponseChoice]
    usage: UsageInfo


class DeltaMessage(BaseModel):
    role: Optional[str] = None
    content: Optional[str] = None


class ChatCompletionResponseStreamChoice(BaseModel):
    index: int
    delta: DeltaMessage
    finish_reason: Optional[Literal["stop", "length"]] = None


class ChatCompletionStreamResponse(BaseModel):
    id: str = Field(default_factory=lambda: f"chatcmpl-{random_uuid()}")
    object: str = "chat.completion.chunk"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    choices: List[ChatCompletionResponseStreamChoice]
240
    usage: Optional[UsageInfo] = Field(default=None)