sequence.py 12.6 KB
Newer Older
1
"""Sequence and its related classes."""
2
import copy
Woosuk Kwon's avatar
Woosuk Kwon committed
3
import enum
Zhuohan Li's avatar
Zhuohan Li committed
4
from typing import Dict, List, Optional, Union
Woosuk Kwon's avatar
Woosuk Kwon committed
5

Woosuk Kwon's avatar
Woosuk Kwon committed
6
7
from vllm.block import LogicalTokenBlock
from vllm.sampling_params import SamplingParams
Woosuk Kwon's avatar
Woosuk Kwon committed
8
9
10


class SequenceStatus(enum.Enum):
11
    """Status of a sequence."""
12
    WAITING = enum.auto()
Woosuk Kwon's avatar
Woosuk Kwon committed
13
    RUNNING = enum.auto()
Woosuk Kwon's avatar
Woosuk Kwon committed
14
    SWAPPED = enum.auto()
Zhuohan Li's avatar
Zhuohan Li committed
15
16
    FINISHED_STOPPED = enum.auto()
    FINISHED_LENGTH_CAPPED = enum.auto()
17
    FINISHED_ABORTED = enum.auto()
Lily Liu's avatar
Lily Liu committed
18
    FINISHED_IGNORED = enum.auto()
Zhuohan Li's avatar
Zhuohan Li committed
19
20
21
22
23
24

    @staticmethod
    def is_finished(status: "SequenceStatus") -> bool:
        return status in [
            SequenceStatus.FINISHED_STOPPED,
            SequenceStatus.FINISHED_LENGTH_CAPPED,
25
            SequenceStatus.FINISHED_ABORTED,
26
            SequenceStatus.FINISHED_IGNORED,
Zhuohan Li's avatar
Zhuohan Li committed
27
28
29
30
31
32
33
34
        ]

    @staticmethod
    def get_finished_reason(status: "SequenceStatus") -> Union[str, None]:
        if status == SequenceStatus.FINISHED_STOPPED:
            finish_reason = "stop"
        elif status == SequenceStatus.FINISHED_LENGTH_CAPPED:
            finish_reason = "length"
35
36
        elif status == SequenceStatus.FINISHED_ABORTED:
            finish_reason = "abort"
Lily Liu's avatar
Lily Liu committed
37
        elif status == SequenceStatus.FINISHED_IGNORED:
38
39
40
            # The ignored sequences are the sequences whose prompt lengths
            # are longer than the model's length cap. Therefore, the stop
            # reason should also be "length" as in OpenAI API.
Lily Liu's avatar
Lily Liu committed
41
            finish_reason = "length"
Zhuohan Li's avatar
Zhuohan Li committed
42
43
44
        else:
            finish_reason = None
        return finish_reason
Woosuk Kwon's avatar
Woosuk Kwon committed
45

46

47
class SequenceData:
48
49
50
51
52
53
54
55
56
57
58
    """Data associated with a sequence.


    Args:
        prompt_token_ids: The token IDs of the prompt.

    Attributes:
        prompt_token_ids: The token IDs of the prompt.
        output_token_ids: The token IDs of the output.
        cumulative_logprob: The cumulative log probability of the output.
    """
59
60
61
62
63
64
65

    def __init__(
        self,
        prompt_token_ids: List[int],
    ) -> None:
        self.prompt_token_ids = prompt_token_ids
        self.output_token_ids: List[int] = []
66
67
        self.cumulative_logprob = 0.0

68
    def append_token_id(self, token_id: int, logprob: float) -> None:
69
70
        self.output_token_ids.append(token_id)
        self.cumulative_logprob += logprob
71
72
73
74

    def get_len(self) -> int:
        return len(self.output_token_ids) + len(self.prompt_token_ids)

75
76
77
    def get_prompt_len(self) -> int:
        return len(self.prompt_token_ids)

78
79
80
    def get_output_len(self) -> int:
        return len(self.output_token_ids)

81
82
83
84
85
86
87
88
89
90
91
    def get_token_ids(self) -> List[int]:
        return self.prompt_token_ids + self.output_token_ids

    def get_last_token_id(self) -> int:
        if not self.output_token_ids:
            return self.prompt_token_ids[-1]
        return self.output_token_ids[-1]

    def __repr__(self) -> str:
        return (f"SequenceData("
                f"prompt_token_ids={self.prompt_token_ids}, "
92
93
                f"output_token_ids={self.output_token_ids}, "
                f"cumulative_logprob={self.cumulative_logprob})")
94
95


Woosuk Kwon's avatar
Woosuk Kwon committed
96
class Sequence:
97
98
99
100
101
102
103
104
105
    """Stores the data, status, and block information of a sequence.

    Args:
        seq_id: The ID of the sequence.
        prompt: The prompt of the sequence.
        prompt_token_ids: The token IDs of the prompt.
        block_size: The block size of the sequence. Should be the same as the
            block size used by the block manager and cache engine.
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
106
107
108
109

    def __init__(
        self,
        seq_id: int,
110
        prompt: str,
111
        prompt_token_ids: List[int],
Woosuk Kwon's avatar
Woosuk Kwon committed
112
113
114
        block_size: int,
    ) -> None:
        self.seq_id = seq_id
115
        self.prompt = prompt
Woosuk Kwon's avatar
Woosuk Kwon committed
116
117
        self.block_size = block_size

118
119
        self.data = SequenceData(prompt_token_ids)
        self.output_logprobs: List[Dict[int, float]] = []
120
        self.output_text = ""
121

Woosuk Kwon's avatar
Woosuk Kwon committed
122
        self.logical_token_blocks: List[LogicalTokenBlock] = []
123
        # Initialize the logical token blocks with the prompt token ids.
124
        self._append_tokens_to_blocks(prompt_token_ids)
125
        self.status = SequenceStatus.WAITING
Woosuk Kwon's avatar
Woosuk Kwon committed
126

127
128
129
130
131
132
        # Used for incremental detokenization
        self.prefix_offset = 0
        self.read_offset = 0
        # Input + output tokens
        self.tokens: Optional[List[str]] = None

133
    def _append_logical_block(self) -> None:
Woosuk Kwon's avatar
Woosuk Kwon committed
134
135
136
137
138
139
        block = LogicalTokenBlock(
            block_number=len(self.logical_token_blocks),
            block_size=self.block_size,
        )
        self.logical_token_blocks.append(block)

140
    def _append_tokens_to_blocks(self, token_ids: List[int]) -> None:
141
142
        cursor = 0
        while cursor < len(token_ids):
Woosuk Kwon's avatar
Woosuk Kwon committed
143
            if not self.logical_token_blocks:
144
                self._append_logical_block()
Woosuk Kwon's avatar
Woosuk Kwon committed
145
146
147

            last_block = self.logical_token_blocks[-1]
            if last_block.is_full():
148
                self._append_logical_block()
Woosuk Kwon's avatar
Woosuk Kwon committed
149
150
151
                last_block = self.logical_token_blocks[-1]

            num_empty_slots = last_block.get_num_empty_slots()
152
153
154
            last_block.append_tokens(token_ids[cursor:cursor +
                                               num_empty_slots])
            cursor += num_empty_slots
Woosuk Kwon's avatar
Woosuk Kwon committed
155

156
157
158
159
160
    def append_token_id(
        self,
        token_id: int,
        logprobs: Dict[int, float],
    ) -> None:
161
        assert token_id in logprobs
162
        self._append_tokens_to_blocks([token_id])
163
        self.output_logprobs.append(logprobs)
164
        self.data.append_token_id(token_id, logprobs[token_id])
165

Woosuk Kwon's avatar
Woosuk Kwon committed
166
    def get_len(self) -> int:
167
        return self.data.get_len()
Woosuk Kwon's avatar
Woosuk Kwon committed
168

169
170
171
    def get_prompt_len(self) -> int:
        return self.data.get_prompt_len()

172
173
174
    def get_output_len(self) -> int:
        return self.data.get_output_len()

Woosuk Kwon's avatar
Woosuk Kwon committed
175
    def get_token_ids(self) -> List[int]:
176
        return self.data.get_token_ids()
Woosuk Kwon's avatar
Woosuk Kwon committed
177

178
    def get_last_token_id(self) -> int:
179
        return self.data.get_last_token_id()
180

181
182
183
184
185
186
    def get_output_token_ids(self) -> List[int]:
        return self.data.output_token_ids

    def get_cumulative_logprob(self) -> float:
        return self.data.cumulative_logprob

187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
    def get_beam_search_score(self,
                              length_penalty: float = 0.0,
                              seq_len: Optional[int] = None,
                              eos_token_id: Optional[int] = None) -> float:
        """Calculate the beam search score with length penalty.

        Adapted from

        https://github.com/huggingface/transformers/blob/ccb92be23def445f2afdea94c31286f84b89eb5b/src/transformers/generation/beam_search.py#L938
        """
        if seq_len is None:
            seq_len = self.get_len()
            # Note: HF implementation does not count the EOS token
            # towards the length, we align with that here for testing.
            if (eos_token_id is not None
                    and self.get_last_token_id() == eos_token_id):
                seq_len -= 1
        return self.get_cumulative_logprob() / (seq_len**length_penalty)

206
207
208
    def is_finished(self) -> bool:
        return SequenceStatus.is_finished(self.status)

209
210
211
212
    def fork(self, new_seq_id: int) -> "Sequence":
        new_seq = copy.deepcopy(self)
        new_seq.seq_id = new_seq_id
        return new_seq
213

Woosuk Kwon's avatar
Woosuk Kwon committed
214
    def __repr__(self) -> str:
215
216
217
        return (f"Sequence(seq_id={self.seq_id}, "
                f"status={self.status.name}, "
                f"num_blocks={len(self.logical_token_blocks)})")
Woosuk Kwon's avatar
Woosuk Kwon committed
218

Woosuk Kwon's avatar
Woosuk Kwon committed
219
220

class SequenceGroup:
221
222
223
224
225
226
227
228
    """A group of sequences that are generated from the same prompt.

    Args:
        request_id: The ID of the request.
        seqs: The list of sequences.
        sampling_params: The sampling parameters used to generate the outputs.
        arrival_time: The arrival time of the request.
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
229
230
231

    def __init__(
        self,
232
        request_id: str,
Woosuk Kwon's avatar
Woosuk Kwon committed
233
        seqs: List[Sequence],
234
        sampling_params: SamplingParams,
235
        arrival_time: float,
Woosuk Kwon's avatar
Woosuk Kwon committed
236
    ) -> None:
237
        self.request_id = request_id
238
        self.seqs_dict = {seq.seq_id: seq for seq in seqs}
239
        self.sampling_params = sampling_params
240
        self.arrival_time = arrival_time
Woosuk Kwon's avatar
Woosuk Kwon committed
241

242
243
244
245
246
247
248
249
250
251
252
253
254
255
    def get_max_num_running_seqs(self) -> int:
        """The maximum number of sequences running in parallel in the remaining
        lifetime of the request."""
        if self.sampling_params.use_beam_search:
            # For beam search, maximally there will always be `best_of` beam
            # candidates running in the future.
            return self.sampling_params.best_of
        else:
            if self.sampling_params.best_of > self.num_seqs():
                # At prompt stage, the sequence group is not yet filled up
                # and only have one sequence running. However, in the
                # generation stage, we will have `best_of` sequences running.
                return self.sampling_params.best_of
            # At sampling stages, return the number of actual sequences
256
257
            # that are not finished yet.
            return self.num_unfinished_seqs()
258

259
260
261
262
    def get_seqs(
        self,
        status: Optional[SequenceStatus] = None,
    ) -> List[Sequence]:
Woosuk Kwon's avatar
Woosuk Kwon committed
263
        if status is None:
264
            return list(self.seqs_dict.values())
Woosuk Kwon's avatar
Woosuk Kwon committed
265
        else:
266
267
268
269
            return [
                seq for seq in self.seqs_dict.values() if seq.status == status
            ]

270
271
272
273
274
    def get_unfinished_seqs(self) -> List[Sequence]:
        return [
            seq for seq in self.seqs_dict.values() if not seq.is_finished()
        ]

275
276
    def get_finished_seqs(self) -> List[Sequence]:
        return [seq for seq in self.seqs_dict.values() if seq.is_finished()]
277
278
279

    def num_seqs(self, status: Optional[SequenceStatus] = None) -> int:
        return len(self.get_seqs(status))
280

281
282
283
284
285
286
    def num_unfinished_seqs(self) -> int:
        return len(self.get_unfinished_seqs())

    def num_finished_seqs(self) -> int:
        return len(self.get_finished_seqs())

287
    def find(self, seq_id: int) -> Sequence:
288
289
290
291
292
293
294
295
296
297
298
299
300
        if seq_id not in self.seqs_dict:
            raise ValueError(f"Sequence {seq_id} not found.")
        return self.seqs_dict[seq_id]

    def add(self, seq: Sequence) -> None:
        if seq.seq_id in self.seqs_dict:
            raise ValueError(f"Sequence {seq.seq_id} already exists.")
        self.seqs_dict[seq.seq_id] = seq

    def remove(self, seq_id: int) -> None:
        if seq_id not in self.seqs_dict:
            raise ValueError(f"Sequence {seq_id} not found.")
        del self.seqs_dict[seq_id]
Woosuk Kwon's avatar
Woosuk Kwon committed
301

Woosuk Kwon's avatar
Woosuk Kwon committed
302
    def is_finished(self) -> bool:
303
        return all(seq.is_finished() for seq in self.get_seqs())
Woosuk Kwon's avatar
Woosuk Kwon committed
304

Woosuk Kwon's avatar
Woosuk Kwon committed
305
    def __repr__(self) -> str:
306
307
        return (f"SequenceGroup(request_id={self.request_id}, "
                f"sampling_params={self.sampling_params}, "
308
                f"num_seqs={len(self.seqs_dict)})")
309
310


311
class SequenceGroupMetadata:
312
313
314
315
316
317
318
319
320
321
322
    """Metadata for a sequence group. Used to create `InputMetadata`.


    Args:
        request_id: The ID of the request.
        is_prompt: Whether the request is at prompt stage.
        seq_data: The sequence data. (Seq id -> sequence data)
        sampling_params: The sampling parameters used to generate the outputs.
        block_tables: The block tables. (Seq id -> list of physical block
            numbers)
    """
323
324
325

    def __init__(
        self,
326
        request_id: str,
327
        is_prompt: bool,
328
        seq_data: Dict[int, SequenceData],
329
        sampling_params: SamplingParams,
330
        block_tables: Dict[int, List[int]],
331
    ) -> None:
332
        self.request_id = request_id
333
        self.is_prompt = is_prompt
334
        self.seq_data = seq_data
335
336
337
338
339
        self.sampling_params = sampling_params
        self.block_tables = block_tables


class SequenceOutputs:
340
341
342
343
344
345
346
347
348
    """The model output associated with a sequence.

    Args:
        parent_seq_id: The ID of the parent sequence (for forking in beam
            search).
        output_token: The output token ID.
        logprobs: The logprobs of the output token.
            (Token id -> logP(x_i+1 | x_0, ..., x_i))
    """
349
350
351
352
353

    def __init__(
        self,
        parent_seq_id: int,
        output_token: int,
354
        logprobs: Dict[int, float],
355
356
357
358
359
360
    ) -> None:
        self.parent_seq_id = parent_seq_id
        self.output_token = output_token
        self.logprobs = logprobs

    def __repr__(self) -> str:
361
        return (f"SequenceOutputs(parent_seq_id={self.parent_seq_id}, "
362
363
                f"output_token={self.output_token}, "
                f"logprobs={self.logprobs})")
Zhuohan Li's avatar
Zhuohan Li committed
364

365
366
    def __eq__(self, other: object) -> bool:
        if not isinstance(other, SequenceOutputs):
Zhuohan Li's avatar
Zhuohan Li committed
367
            raise NotImplementedError()
368
        return (self.parent_seq_id == other.parent_seq_id
369
370
                and self.output_token == other.output_token
                and self.logprobs == other.logprobs)
371
372
373
374
375


# For each sequence group, we generate a list of SequenceOutputs object,
# each of which contains one possible candidate for the next token.
SamplerOutput = List[List[SequenceOutputs]]