io_struct.py 10.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
"""
Copyright 2023-2024 SGLang Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

Lianmin Zheng's avatar
Lianmin Zheng committed
16
17
18
19
20
"""
The definition of objects transfered between different
processes (TokenizerManager, DetokenizerManager, Controller).
"""

Lianmin Zheng's avatar
Lianmin Zheng committed
21
22
23
24
import uuid
from dataclasses import dataclass
from typing import Dict, List, Optional, Union

25
from sglang.srt.managers.schedule_batch import BaseFinishReason
26
from sglang.srt.sampling_params import SamplingParams
Lianmin Zheng's avatar
Lianmin Zheng committed
27
28
29
30


@dataclass
class GenerateReqInput:
Ying Sheng's avatar
Ying Sheng committed
31
    # The input prompt. It can be a single prompt or a batch of prompts.
32
    text: Optional[Union[List[str], str]] = None
Ying Sheng's avatar
Ying Sheng committed
33
    # The token ids for text; one can either specify text or input_ids.
34
    input_ids: Optional[Union[List[List[int]], List[int]]] = None
Ying Sheng's avatar
Ying Sheng committed
35
36
    # The image input. It can be a file name, a url, or base64 encoded string.
    # See also python/sglang/srt/utils.py:load_image.
Lianmin Zheng's avatar
Lianmin Zheng committed
37
    image_data: Optional[Union[List[str], str]] = None
38
    # The sampling_params. See descriptions below.
Lianmin Zheng's avatar
Lianmin Zheng committed
39
    sampling_params: Union[List[Dict], Dict] = None
Ying Sheng's avatar
Ying Sheng committed
40
    # The request id.
Lianmin Zheng's avatar
Lianmin Zheng committed
41
    rid: Optional[Union[List[str], str]] = None
Ying Sheng's avatar
Ying Sheng committed
42
    # Whether to return logprobs.
43
    return_logprob: Optional[Union[List[bool], bool]] = None
44
    # If return logprobs, the start location in the prompt for returning logprobs.
45
    logprob_start_len: Optional[Union[List[int], int]] = None
46
    # If return logprobs, the number of top logprobs to return at each position.
Liangsheng Yin's avatar
Liangsheng Yin committed
47
    top_logprobs_num: Optional[Union[List[int], int]] = None
48
    # Whether to detokenize tokens in text in the returned logprobs.
49
    return_text_in_logprobs: bool = False
Ying Sheng's avatar
Ying Sheng committed
50
    # Whether to stream output.
Lianmin Zheng's avatar
Lianmin Zheng committed
51
52
53
    stream: bool = False

    def post_init(self):
54
55
56
        if (self.text is None and self.input_ids is None) or (
            self.text is not None and self.input_ids is not None
        ):
57
            raise ValueError("Either text or input_ids should be provided.")
Yineng Zhang's avatar
Yineng Zhang committed
58
59
60
61
        if (
            isinstance(self.sampling_params, dict)
            and self.sampling_params.get("n", 1) != 1
        ):
62
            is_single = False
63
        else:
64
65
66
67
            if self.text is not None:
                is_single = isinstance(self.text, str)
            else:
                is_single = isinstance(self.input_ids[0], int)
68
        self.is_single = is_single
Lianmin Zheng's avatar
Lianmin Zheng committed
69
70
71
72
73
74

        if is_single:
            if self.sampling_params is None:
                self.sampling_params = {}
            if self.rid is None:
                self.rid = uuid.uuid4().hex
75
76
77
            if self.return_logprob is None:
                self.return_logprob = False
            if self.logprob_start_len is None:
78
                self.logprob_start_len = -1
Liangsheng Yin's avatar
Liangsheng Yin committed
79
80
            if self.top_logprobs_num is None:
                self.top_logprobs_num = 0
Lianmin Zheng's avatar
Lianmin Zheng committed
81
        else:
82
83
84
85
86
87
88
89
90
91
92
93
94
            parallel_sample_num_list = []
            if isinstance(self.sampling_params, dict):
                parallel_sample_num = self.sampling_params.get("n", 1)
            elif isinstance(self.sampling_params, list):
                for sp in self.sampling_params:
                    parallel_sample_num = sp.get("n", 1)
                    parallel_sample_num_list.append(parallel_sample_num)
                parallel_sample_num = max(parallel_sample_num_list)
                all_equal = all(
                    element == parallel_sample_num
                    for element in parallel_sample_num_list
                )
                if parallel_sample_num > 1 and (not all_equal):
yichuan~'s avatar
yichuan~ committed
95
                    # TODO cope with the case that the parallel_sample_num is different for different samples
96
97
98
99
100
101
                    raise ValueError(
                        "The parallel_sample_num should be the same for all samples in sample params."
                    )
            else:
                parallel_sample_num = 1
            self.parallel_sample_num = parallel_sample_num
102
103
104
105

            if parallel_sample_num != 1:
                # parallel sampling +1 represents the original prefill stage
                num = parallel_sample_num + 1
yichuan~'s avatar
yichuan~ committed
106
107
                if isinstance(self.text, list):
                    # suppot batch operation
108
109
                    self.batch_size = len(self.text)
                    num = num * len(self.text)
yichuan~'s avatar
yichuan~ committed
110
111
112
113
114
                elif isinstance(self.input_ids, list) and isinstance(
                    self.input_ids[0], list
                ):
                    self.batch_size = len(self.input_ids)
                    num = num * len(self.input_ids)
115
116
117
                else:
                    self.batch_size = 1
            else:
yichuan~'s avatar
yichuan~ committed
118
                # support select operation
119
120
                num = len(self.text) if self.text is not None else len(self.input_ids)
                self.batch_size = num
Lianmin Zheng's avatar
Lianmin Zheng committed
121
122
123
124
125
126
127
128
129
130
131
132
133
134

            if self.image_data is None:
                self.image_data = [None] * num
            elif not isinstance(self.image_data, list):
                self.image_data = [self.image_data] * num

            if self.sampling_params is None:
                self.sampling_params = [{}] * num
            elif not isinstance(self.sampling_params, list):
                self.sampling_params = [self.sampling_params] * num

            if self.rid is None:
                self.rid = [uuid.uuid4().hex for _ in range(num)]
            else:
135
136
                if not isinstance(self.rid, list):
                    raise ValueError("The rid should be a list.")
Lianmin Zheng's avatar
Lianmin Zheng committed
137

138
139
140
141
            if self.return_logprob is None:
                self.return_logprob = [False] * num
            elif not isinstance(self.return_logprob, list):
                self.return_logprob = [self.return_logprob] * num
Lianmin Zheng's avatar
Lianmin Zheng committed
142

143
            if self.logprob_start_len is None:
144
                self.logprob_start_len = [-1] * num
145
146
            elif not isinstance(self.logprob_start_len, list):
                self.logprob_start_len = [self.logprob_start_len] * num
Lianmin Zheng's avatar
Lianmin Zheng committed
147

Liangsheng Yin's avatar
Liangsheng Yin committed
148
149
150
151
152
            if self.top_logprobs_num is None:
                self.top_logprobs_num = [0] * num
            elif not isinstance(self.top_logprobs_num, list):
                self.top_logprobs_num = [self.top_logprobs_num] * num

Lianmin Zheng's avatar
Lianmin Zheng committed
153
154
155

@dataclass
class TokenizedGenerateReqInput:
156
    # The request id
Lianmin Zheng's avatar
Lianmin Zheng committed
157
    rid: str
158
    # The input text
Liangsheng Yin's avatar
Liangsheng Yin committed
159
    input_text: str
160
    # The input token ids
Lianmin Zheng's avatar
Lianmin Zheng committed
161
    input_ids: List[int]
162
    # The pixel values for input images
Lianmin Zheng's avatar
Lianmin Zheng committed
163
    pixel_values: List[float]
164
    # The hash of input images
Lianmin Zheng's avatar
Lianmin Zheng committed
165
    image_hash: int
166
    # The image size
shiyi.c_98's avatar
shiyi.c_98 committed
167
    image_size: List[int]
168
    # The sampling parameters
Lianmin Zheng's avatar
Lianmin Zheng committed
169
    sampling_params: SamplingParams
170
    # Whether to return the logprobs
171
    return_logprob: bool
172
    # If return logprobs, the start location in the prompt for returning logprobs.
173
    logprob_start_len: int
174
    # If return logprobs, the number of top logprobs to return at each position.
Liangsheng Yin's avatar
Liangsheng Yin committed
175
    top_logprobs_num: int
176
    # Whether to stream output
Lianmin Zheng's avatar
Lianmin Zheng committed
177
178
179
    stream: bool


180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
@dataclass
class EmbeddingReqInput:
    # The input prompt. It can be a single prompt or a batch of prompts.
    text: Optional[Union[List[str], str]] = None
    # The token ids for text; one can either specify text or input_ids.
    input_ids: Optional[Union[List[List[int]], List[int]]] = None
    # The request id.
    rid: Optional[Union[List[str], str]] = None
    # Dummy sampling params for compatibility
    sampling_params: Union[List[Dict], Dict] = None

    def post_init(self):
        if (self.text is None and self.input_ids is None) or (
            self.text is not None and self.input_ids is not None
        ):
            raise ValueError("Either text or input_ids should be provided.")

        if self.text is not None:
            is_single = isinstance(self.text, str)
        else:
            is_single = isinstance(self.input_ids[0], int)
        self.is_single = is_single

        if is_single:
            if self.rid is None:
                self.rid = uuid.uuid4().hex
Ying Sheng's avatar
Ying Sheng committed
206
            if self.sampling_params is None:
207
208
                self.sampling_params = {}
            self.sampling_params["max_new_tokens"] = 1
209
210
211
212
213
214
215
216
217
218
        else:
            # support select operation
            self.batch_size = (
                len(self.text) if self.text is not None else len(self.input_ids)
            )
            if self.rid is None:
                self.rid = [uuid.uuid4().hex for _ in range(self.batch_size)]
            else:
                if not isinstance(self.rid, list):
                    raise ValueError("The rid should be a list.")
Ying Sheng's avatar
Ying Sheng committed
219
            if self.sampling_params is None:
220
221
222
                self.sampling_params = [{}] * self.batch_size
            for i in range(self.batch_size):
                self.sampling_params[i]["max_new_tokens"] = 1
223
224
225
226


@dataclass
class TokenizedEmbeddingReqInput:
227
    # The request id
228
    rid: str
229
    # The input text
230
    input_text: str
231
    # The input token ids
232
    input_ids: List[int]
233
    # Dummy sampling params for compatibility
234
235
236
    sampling_params: SamplingParams


Lianmin Zheng's avatar
Lianmin Zheng committed
237
238
@dataclass
class BatchTokenIDOut:
239
    # The request id
Lianmin Zheng's avatar
Lianmin Zheng committed
240
    rids: List[str]
241
    # The version id to sync decode status with in detokenizer_manager
242
    vids: List[int]
Liangsheng Yin's avatar
Liangsheng Yin committed
243
    decoded_texts: List[str]
244
245
    decode_ids: List[int]
    read_offsets: List[int]
Lianmin Zheng's avatar
Lianmin Zheng committed
246
    skip_special_tokens: List[bool]
247
    spaces_between_special_tokens: List[bool]
Lianmin Zheng's avatar
Lianmin Zheng committed
248
    meta_info: List[Dict]
249
    finished_reason: List[BaseFinishReason]
Lianmin Zheng's avatar
Lianmin Zheng committed
250

Liangsheng Yin's avatar
Liangsheng Yin committed
251

Lianmin Zheng's avatar
Lianmin Zheng committed
252
253
@dataclass
class BatchStrOut:
254
    # The request id
Lianmin Zheng's avatar
Lianmin Zheng committed
255
    rids: List[str]
256
    # The output decoded strings
257
    output_strs: List[str]
258
    # The meta info
Lianmin Zheng's avatar
Lianmin Zheng committed
259
    meta_info: List[Dict]
260
    # The finish reason
261
    finished_reason: List[BaseFinishReason]
Liangsheng Yin's avatar
Liangsheng Yin committed
262
263


264
265
@dataclass
class BatchEmbeddingOut:
266
    # The request id
267
    rids: List[str]
268
    # The output embedding
269
    embeddings: List[List[float]]
270
    # The meta info
271
    meta_info: List[Dict]
272
    # The finish reason
273
274
275
    finished_reason: List[BaseFinishReason]


Liangsheng Yin's avatar
Liangsheng Yin committed
276
277
278
@dataclass
class FlushCacheReq:
    pass
Cody Yu's avatar
Cody Yu committed
279

280

281
282
283
284
285
286
287
288
289
290
291
292
293
294
@dataclass
class UpdateWeightReqInput:
    # The model path with the new weights
    model_path: str
    # The format to load the weights
    load_format: Optional[str] = None


@dataclass
class UpdateWeightReqOutput:
    success: bool
    message: str


295
296
@dataclass
class AbortReq:
297
    # The request id
298
    rid: str