sequence.py 12.4 KB
Newer Older
1
"""Sequence and its related classes."""
2
import copy
Woosuk Kwon's avatar
Woosuk Kwon committed
3
import enum
Zhuohan Li's avatar
Zhuohan Li committed
4
from typing import Dict, List, Optional, Union
Woosuk Kwon's avatar
Woosuk Kwon committed
5

Woosuk Kwon's avatar
Woosuk Kwon committed
6
7
from vllm.block import LogicalTokenBlock
from vllm.sampling_params import SamplingParams
Woosuk Kwon's avatar
Woosuk Kwon committed
8
9
10


class SequenceStatus(enum.Enum):
11
    """Status of a sequence."""
12
    WAITING = enum.auto()
Woosuk Kwon's avatar
Woosuk Kwon committed
13
    RUNNING = enum.auto()
Woosuk Kwon's avatar
Woosuk Kwon committed
14
    SWAPPED = enum.auto()
Zhuohan Li's avatar
Zhuohan Li committed
15
16
    FINISHED_STOPPED = enum.auto()
    FINISHED_LENGTH_CAPPED = enum.auto()
17
    FINISHED_ABORTED = enum.auto()
Lily Liu's avatar
Lily Liu committed
18
    FINISHED_IGNORED = enum.auto()
Zhuohan Li's avatar
Zhuohan Li committed
19
20
21
22
23
24

    @staticmethod
    def is_finished(status: "SequenceStatus") -> bool:
        return status in [
            SequenceStatus.FINISHED_STOPPED,
            SequenceStatus.FINISHED_LENGTH_CAPPED,
25
            SequenceStatus.FINISHED_ABORTED,
26
            SequenceStatus.FINISHED_IGNORED,
Zhuohan Li's avatar
Zhuohan Li committed
27
28
29
30
31
32
33
34
        ]

    @staticmethod
    def get_finished_reason(status: "SequenceStatus") -> Union[str, None]:
        if status == SequenceStatus.FINISHED_STOPPED:
            finish_reason = "stop"
        elif status == SequenceStatus.FINISHED_LENGTH_CAPPED:
            finish_reason = "length"
35
36
        elif status == SequenceStatus.FINISHED_ABORTED:
            finish_reason = "abort"
Lily Liu's avatar
Lily Liu committed
37
38
        elif status == SequenceStatus.FINISHED_IGNORED:
            finish_reason = "length"
Zhuohan Li's avatar
Zhuohan Li committed
39
40
41
        else:
            finish_reason = None
        return finish_reason
Woosuk Kwon's avatar
Woosuk Kwon committed
42

43

44
class SequenceData:
45
46
47
48
49
50
51
52
53
54
55
    """Data associated with a sequence.


    Args:
        prompt_token_ids: The token IDs of the prompt.

    Attributes:
        prompt_token_ids: The token IDs of the prompt.
        output_token_ids: The token IDs of the output.
        cumulative_logprob: The cumulative log probability of the output.
    """
56
57
58
59
60
61
62

    def __init__(
        self,
        prompt_token_ids: List[int],
    ) -> None:
        self.prompt_token_ids = prompt_token_ids
        self.output_token_ids: List[int] = []
63
64
        self.cumulative_logprob = 0.0

65
    def append_token_id(self, token_id: int, logprob: float) -> None:
66
67
        self.output_token_ids.append(token_id)
        self.cumulative_logprob += logprob
68
69
70
71

    def get_len(self) -> int:
        return len(self.output_token_ids) + len(self.prompt_token_ids)

72
73
74
    def get_prompt_len(self) -> int:
        return len(self.prompt_token_ids)

75
76
77
    def get_output_len(self) -> int:
        return len(self.output_token_ids)

78
79
80
81
82
83
84
85
86
87
88
    def get_token_ids(self) -> List[int]:
        return self.prompt_token_ids + self.output_token_ids

    def get_last_token_id(self) -> int:
        if not self.output_token_ids:
            return self.prompt_token_ids[-1]
        return self.output_token_ids[-1]

    def __repr__(self) -> str:
        return (f"SequenceData("
                f"prompt_token_ids={self.prompt_token_ids}, "
89
90
                f"output_token_ids={self.output_token_ids}, "
                f"cumulative_logprob={self.cumulative_logprob})")
91
92


Woosuk Kwon's avatar
Woosuk Kwon committed
93
class Sequence:
94
95
96
97
98
99
100
101
102
    """Stores the data, status, and block information of a sequence.

    Args:
        seq_id: The ID of the sequence.
        prompt: The prompt of the sequence.
        prompt_token_ids: The token IDs of the prompt.
        block_size: The block size of the sequence. Should be the same as the
            block size used by the block manager and cache engine.
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
103
104
105
106

    def __init__(
        self,
        seq_id: int,
107
        prompt: str,
108
        prompt_token_ids: List[int],
Woosuk Kwon's avatar
Woosuk Kwon committed
109
110
111
        block_size: int,
    ) -> None:
        self.seq_id = seq_id
112
        self.prompt = prompt
Woosuk Kwon's avatar
Woosuk Kwon committed
113
114
        self.block_size = block_size

115
116
        self.data = SequenceData(prompt_token_ids)
        self.output_logprobs: List[Dict[int, float]] = []
117
        self.output_text = ""
118

Woosuk Kwon's avatar
Woosuk Kwon committed
119
        self.logical_token_blocks: List[LogicalTokenBlock] = []
120
        # Initialize the logical token blocks with the prompt token ids.
121
        self._append_tokens_to_blocks(prompt_token_ids)
122
        self.status = SequenceStatus.WAITING
Woosuk Kwon's avatar
Woosuk Kwon committed
123

124
125
126
127
128
129
        # Used for incremental detokenization
        self.prefix_offset = 0
        self.read_offset = 0
        # Input + output tokens
        self.tokens: Optional[List[str]] = None

130
    def _append_logical_block(self) -> None:
Woosuk Kwon's avatar
Woosuk Kwon committed
131
132
133
134
135
136
        block = LogicalTokenBlock(
            block_number=len(self.logical_token_blocks),
            block_size=self.block_size,
        )
        self.logical_token_blocks.append(block)

137
    def _append_tokens_to_blocks(self, token_ids: List[int]) -> None:
138
139
        cursor = 0
        while cursor < len(token_ids):
Woosuk Kwon's avatar
Woosuk Kwon committed
140
            if not self.logical_token_blocks:
141
                self._append_logical_block()
Woosuk Kwon's avatar
Woosuk Kwon committed
142
143
144

            last_block = self.logical_token_blocks[-1]
            if last_block.is_full():
145
                self._append_logical_block()
Woosuk Kwon's avatar
Woosuk Kwon committed
146
147
148
                last_block = self.logical_token_blocks[-1]

            num_empty_slots = last_block.get_num_empty_slots()
149
150
151
            last_block.append_tokens(token_ids[cursor:cursor +
                                               num_empty_slots])
            cursor += num_empty_slots
Woosuk Kwon's avatar
Woosuk Kwon committed
152

153
154
155
156
157
    def append_token_id(
        self,
        token_id: int,
        logprobs: Dict[int, float],
    ) -> None:
158
        assert token_id in logprobs
159
        self._append_tokens_to_blocks([token_id])
160
        self.output_logprobs.append(logprobs)
161
        self.data.append_token_id(token_id, logprobs[token_id])
162

Woosuk Kwon's avatar
Woosuk Kwon committed
163
    def get_len(self) -> int:
164
        return self.data.get_len()
Woosuk Kwon's avatar
Woosuk Kwon committed
165

166
167
168
    def get_prompt_len(self) -> int:
        return self.data.get_prompt_len()

169
170
171
    def get_output_len(self) -> int:
        return self.data.get_output_len()

Woosuk Kwon's avatar
Woosuk Kwon committed
172
    def get_token_ids(self) -> List[int]:
173
        return self.data.get_token_ids()
Woosuk Kwon's avatar
Woosuk Kwon committed
174

175
    def get_last_token_id(self) -> int:
176
        return self.data.get_last_token_id()
177

178
179
180
181
182
183
    def get_output_token_ids(self) -> List[int]:
        return self.data.output_token_ids

    def get_cumulative_logprob(self) -> float:
        return self.data.cumulative_logprob

184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
    def get_beam_search_score(self,
                              length_penalty: float = 0.0,
                              seq_len: Optional[int] = None,
                              eos_token_id: Optional[int] = None) -> float:
        """Calculate the beam search score with length penalty.

        Adapted from

        https://github.com/huggingface/transformers/blob/ccb92be23def445f2afdea94c31286f84b89eb5b/src/transformers/generation/beam_search.py#L938
        """
        if seq_len is None:
            seq_len = self.get_len()
            # Note: HF implementation does not count the EOS token
            # towards the length, we align with that here for testing.
            if (eos_token_id is not None
                    and self.get_last_token_id() == eos_token_id):
                seq_len -= 1
        return self.get_cumulative_logprob() / (seq_len**length_penalty)

203
204
205
    def is_finished(self) -> bool:
        return SequenceStatus.is_finished(self.status)

206
207
208
209
    def fork(self, new_seq_id: int) -> "Sequence":
        new_seq = copy.deepcopy(self)
        new_seq.seq_id = new_seq_id
        return new_seq
210

Woosuk Kwon's avatar
Woosuk Kwon committed
211
    def __repr__(self) -> str:
212
213
214
        return (f"Sequence(seq_id={self.seq_id}, "
                f"status={self.status.name}, "
                f"num_blocks={len(self.logical_token_blocks)})")
Woosuk Kwon's avatar
Woosuk Kwon committed
215

Woosuk Kwon's avatar
Woosuk Kwon committed
216
217

class SequenceGroup:
218
219
220
221
222
223
224
225
    """A group of sequences that are generated from the same prompt.

    Args:
        request_id: The ID of the request.
        seqs: The list of sequences.
        sampling_params: The sampling parameters used to generate the outputs.
        arrival_time: The arrival time of the request.
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
226
227
228

    def __init__(
        self,
229
        request_id: str,
Woosuk Kwon's avatar
Woosuk Kwon committed
230
        seqs: List[Sequence],
231
        sampling_params: SamplingParams,
232
        arrival_time: float,
Woosuk Kwon's avatar
Woosuk Kwon committed
233
    ) -> None:
234
        self.request_id = request_id
235
        self.seqs_dict = {seq.seq_id: seq for seq in seqs}
236
        self.sampling_params = sampling_params
237
        self.arrival_time = arrival_time
Woosuk Kwon's avatar
Woosuk Kwon committed
238

239
240
241
242
243
244
245
246
247
248
249
250
251
252
    def get_max_num_running_seqs(self) -> int:
        """The maximum number of sequences running in parallel in the remaining
        lifetime of the request."""
        if self.sampling_params.use_beam_search:
            # For beam search, maximally there will always be `best_of` beam
            # candidates running in the future.
            return self.sampling_params.best_of
        else:
            if self.sampling_params.best_of > self.num_seqs():
                # At prompt stage, the sequence group is not yet filled up
                # and only have one sequence running. However, in the
                # generation stage, we will have `best_of` sequences running.
                return self.sampling_params.best_of
            # At sampling stages, return the number of actual sequences
253
254
            # that are not finished yet.
            return self.num_unfinished_seqs()
255

256
257
258
259
    def get_seqs(
        self,
        status: Optional[SequenceStatus] = None,
    ) -> List[Sequence]:
Woosuk Kwon's avatar
Woosuk Kwon committed
260
        if status is None:
261
            return list(self.seqs_dict.values())
Woosuk Kwon's avatar
Woosuk Kwon committed
262
        else:
263
264
265
266
            return [
                seq for seq in self.seqs_dict.values() if seq.status == status
            ]

267
268
269
270
271
    def get_unfinished_seqs(self) -> List[Sequence]:
        return [
            seq for seq in self.seqs_dict.values() if not seq.is_finished()
        ]

272
273
    def get_finished_seqs(self) -> List[Sequence]:
        return [seq for seq in self.seqs_dict.values() if seq.is_finished()]
274
275
276

    def num_seqs(self, status: Optional[SequenceStatus] = None) -> int:
        return len(self.get_seqs(status))
277

278
279
280
281
282
283
    def num_unfinished_seqs(self) -> int:
        return len(self.get_unfinished_seqs())

    def num_finished_seqs(self) -> int:
        return len(self.get_finished_seqs())

284
    def find(self, seq_id: int) -> Sequence:
285
286
287
288
289
290
291
292
293
294
295
296
297
        if seq_id not in self.seqs_dict:
            raise ValueError(f"Sequence {seq_id} not found.")
        return self.seqs_dict[seq_id]

    def add(self, seq: Sequence) -> None:
        if seq.seq_id in self.seqs_dict:
            raise ValueError(f"Sequence {seq.seq_id} already exists.")
        self.seqs_dict[seq.seq_id] = seq

    def remove(self, seq_id: int) -> None:
        if seq_id not in self.seqs_dict:
            raise ValueError(f"Sequence {seq_id} not found.")
        del self.seqs_dict[seq_id]
Woosuk Kwon's avatar
Woosuk Kwon committed
298

Woosuk Kwon's avatar
Woosuk Kwon committed
299
    def is_finished(self) -> bool:
300
        return all(seq.is_finished() for seq in self.get_seqs())
Woosuk Kwon's avatar
Woosuk Kwon committed
301

Woosuk Kwon's avatar
Woosuk Kwon committed
302
    def __repr__(self) -> str:
303
304
        return (f"SequenceGroup(request_id={self.request_id}, "
                f"sampling_params={self.sampling_params}, "
305
                f"num_seqs={len(self.seqs_dict)})")
306
307


308
class SequenceGroupMetadata:
309
310
311
312
313
314
315
316
317
318
319
    """Metadata for a sequence group. Used to create `InputMetadata`.


    Args:
        request_id: The ID of the request.
        is_prompt: Whether the request is at prompt stage.
        seq_data: The sequence data. (Seq id -> sequence data)
        sampling_params: The sampling parameters used to generate the outputs.
        block_tables: The block tables. (Seq id -> list of physical block
            numbers)
    """
320
321
322

    def __init__(
        self,
323
        request_id: str,
324
        is_prompt: bool,
325
        seq_data: Dict[int, SequenceData],
326
        sampling_params: SamplingParams,
327
        block_tables: Dict[int, List[int]],
328
    ) -> None:
329
        self.request_id = request_id
330
        self.is_prompt = is_prompt
331
        self.seq_data = seq_data
332
333
334
335
336
        self.sampling_params = sampling_params
        self.block_tables = block_tables


class SequenceOutputs:
337
338
339
340
341
342
343
344
345
    """The model output associated with a sequence.

    Args:
        parent_seq_id: The ID of the parent sequence (for forking in beam
            search).
        output_token: The output token ID.
        logprobs: The logprobs of the output token.
            (Token id -> logP(x_i+1 | x_0, ..., x_i))
    """
346
347
348
349
350

    def __init__(
        self,
        parent_seq_id: int,
        output_token: int,
351
        logprobs: Dict[int, float],
352
353
354
355
356
357
    ) -> None:
        self.parent_seq_id = parent_seq_id
        self.output_token = output_token
        self.logprobs = logprobs

    def __repr__(self) -> str:
358
        return (f"SequenceOutputs(parent_seq_id={self.parent_seq_id}, "
359
360
                f"output_token={self.output_token}), "
                f"logprobs={self.logprobs}")
Zhuohan Li's avatar
Zhuohan Li committed
361

362
363
    def __eq__(self, other: object) -> bool:
        if not isinstance(other, SequenceOutputs):
Zhuohan Li's avatar
Zhuohan Li committed
364
            raise NotImplementedError()
365
        return (self.parent_seq_id == other.parent_seq_id
366
367
                and self.output_token == other.output_token
                and self.logprobs == other.logprobs)
368
369
370
371
372


# For each sequence group, we generate a list of SequenceOutputs object,
# each of which contains one possible candidate for the next token.
SamplerOutput = List[List[SequenceOutputs]]