sequence.py 11.9 KB
Newer Older
1
"""Sequence and its related classes."""
2
import copy
Woosuk Kwon's avatar
Woosuk Kwon committed
3
import enum
Zhuohan Li's avatar
Zhuohan Li committed
4
from typing import Dict, List, Optional, Union
Woosuk Kwon's avatar
Woosuk Kwon committed
5

Woosuk Kwon's avatar
Woosuk Kwon committed
6
7
from vllm.block import LogicalTokenBlock
from vllm.sampling_params import SamplingParams
Woosuk Kwon's avatar
Woosuk Kwon committed
8
9
10


class SequenceStatus(enum.Enum):
11
    """Status of a sequence."""
12
    WAITING = enum.auto()
Woosuk Kwon's avatar
Woosuk Kwon committed
13
    RUNNING = enum.auto()
Woosuk Kwon's avatar
Woosuk Kwon committed
14
    SWAPPED = enum.auto()
Zhuohan Li's avatar
Zhuohan Li committed
15
16
    FINISHED_STOPPED = enum.auto()
    FINISHED_LENGTH_CAPPED = enum.auto()
17
    FINISHED_ABORTED = enum.auto()
Lily Liu's avatar
Lily Liu committed
18
    FINISHED_IGNORED = enum.auto()
Zhuohan Li's avatar
Zhuohan Li committed
19
20
21
22
23
24

    @staticmethod
    def is_finished(status: "SequenceStatus") -> bool:
        return status in [
            SequenceStatus.FINISHED_STOPPED,
            SequenceStatus.FINISHED_LENGTH_CAPPED,
25
            SequenceStatus.FINISHED_ABORTED,
26
            SequenceStatus.FINISHED_IGNORED,
Zhuohan Li's avatar
Zhuohan Li committed
27
28
29
30
31
32
33
34
        ]

    @staticmethod
    def get_finished_reason(status: "SequenceStatus") -> Union[str, None]:
        if status == SequenceStatus.FINISHED_STOPPED:
            finish_reason = "stop"
        elif status == SequenceStatus.FINISHED_LENGTH_CAPPED:
            finish_reason = "length"
35
36
        elif status == SequenceStatus.FINISHED_ABORTED:
            finish_reason = "abort"
Lily Liu's avatar
Lily Liu committed
37
38
        elif status == SequenceStatus.FINISHED_IGNORED:
            finish_reason = "length"
Zhuohan Li's avatar
Zhuohan Li committed
39
40
41
        else:
            finish_reason = None
        return finish_reason
Woosuk Kwon's avatar
Woosuk Kwon committed
42

43

44
class SequenceData:
45
46
47
48
49
50
51
52
53
54
55
    """Data associated with a sequence.


    Args:
        prompt_token_ids: The token IDs of the prompt.

    Attributes:
        prompt_token_ids: The token IDs of the prompt.
        output_token_ids: The token IDs of the output.
        cumulative_logprob: The cumulative log probability of the output.
    """
56
57
58
59
60
61
62

    def __init__(
        self,
        prompt_token_ids: List[int],
    ) -> None:
        self.prompt_token_ids = prompt_token_ids
        self.output_token_ids: List[int] = []
63
64
        self.cumulative_logprob = 0.0

65
    def append_token_id(self, token_id: int, logprob: float) -> None:
66
67
        self.output_token_ids.append(token_id)
        self.cumulative_logprob += logprob
68
69
70
71

    def get_len(self) -> int:
        return len(self.output_token_ids) + len(self.prompt_token_ids)

72
73
74
    def get_prompt_len(self) -> int:
        return len(self.prompt_token_ids)

75
76
77
    def get_output_len(self) -> int:
        return len(self.output_token_ids)

78
79
80
81
82
83
84
85
86
87
88
    def get_token_ids(self) -> List[int]:
        return self.prompt_token_ids + self.output_token_ids

    def get_last_token_id(self) -> int:
        if not self.output_token_ids:
            return self.prompt_token_ids[-1]
        return self.output_token_ids[-1]

    def __repr__(self) -> str:
        return (f"SequenceData("
                f"prompt_token_ids={self.prompt_token_ids}, "
89
90
                f"output_token_ids={self.output_token_ids}, "
                f"cumulative_logprob={self.cumulative_logprob})")
91
92


Woosuk Kwon's avatar
Woosuk Kwon committed
93
class Sequence:
94
95
96
97
98
99
100
101
102
    """Stores the data, status, and block information of a sequence.

    Args:
        seq_id: The ID of the sequence.
        prompt: The prompt of the sequence.
        prompt_token_ids: The token IDs of the prompt.
        block_size: The block size of the sequence. Should be the same as the
            block size used by the block manager and cache engine.
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
103
104
105
106

    def __init__(
        self,
        seq_id: int,
107
        prompt: str,
108
        prompt_token_ids: List[int],
Woosuk Kwon's avatar
Woosuk Kwon committed
109
110
111
        block_size: int,
    ) -> None:
        self.seq_id = seq_id
112
        self.prompt = prompt
Woosuk Kwon's avatar
Woosuk Kwon committed
113
114
        self.block_size = block_size

115
116
        self.data = SequenceData(prompt_token_ids)
        self.output_logprobs: List[Dict[int, float]] = []
117
        self.output_tokens: List[str] = []
118
        self.output_text = ""
119

Woosuk Kwon's avatar
Woosuk Kwon committed
120
        self.logical_token_blocks: List[LogicalTokenBlock] = []
121
        # Initialize the logical token blocks with the prompt token ids.
122
        self._append_tokens_to_blocks(prompt_token_ids)
123
        self.status = SequenceStatus.WAITING
Woosuk Kwon's avatar
Woosuk Kwon committed
124

125
    def _append_logical_block(self) -> None:
Woosuk Kwon's avatar
Woosuk Kwon committed
126
127
128
129
130
131
        block = LogicalTokenBlock(
            block_number=len(self.logical_token_blocks),
            block_size=self.block_size,
        )
        self.logical_token_blocks.append(block)

132
    def _append_tokens_to_blocks(self, token_ids: List[int]) -> None:
133
134
        cursor = 0
        while cursor < len(token_ids):
Woosuk Kwon's avatar
Woosuk Kwon committed
135
            if not self.logical_token_blocks:
136
                self._append_logical_block()
Woosuk Kwon's avatar
Woosuk Kwon committed
137
138
139

            last_block = self.logical_token_blocks[-1]
            if last_block.is_full():
140
                self._append_logical_block()
Woosuk Kwon's avatar
Woosuk Kwon committed
141
142
143
                last_block = self.logical_token_blocks[-1]

            num_empty_slots = last_block.get_num_empty_slots()
144
145
146
            last_block.append_tokens(token_ids[cursor:cursor +
                                               num_empty_slots])
            cursor += num_empty_slots
Woosuk Kwon's avatar
Woosuk Kwon committed
147

148
149
150
151
152
    def append_token_id(
        self,
        token_id: int,
        logprobs: Dict[int, float],
    ) -> None:
153
        assert token_id in logprobs
154
        self._append_tokens_to_blocks([token_id])
155
        self.output_logprobs.append(logprobs)
156
        self.data.append_token_id(token_id, logprobs[token_id])
157

Woosuk Kwon's avatar
Woosuk Kwon committed
158
    def get_len(self) -> int:
159
        return self.data.get_len()
Woosuk Kwon's avatar
Woosuk Kwon committed
160

161
162
163
    def get_prompt_len(self) -> int:
        return self.data.get_prompt_len()

164
165
166
    def get_output_len(self) -> int:
        return self.data.get_output_len()

Woosuk Kwon's avatar
Woosuk Kwon committed
167
    def get_token_ids(self) -> List[int]:
168
        return self.data.get_token_ids()
Woosuk Kwon's avatar
Woosuk Kwon committed
169

170
    def get_last_token_id(self) -> int:
171
        return self.data.get_last_token_id()
172

173
174
175
176
177
178
    def get_output_token_ids(self) -> List[int]:
        return self.data.output_token_ids

    def get_cumulative_logprob(self) -> float:
        return self.data.cumulative_logprob

179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
    def get_beam_search_score(self,
                              length_penalty: float = 0.0,
                              seq_len: Optional[int] = None,
                              eos_token_id: Optional[int] = None) -> float:
        """Calculate the beam search score with length penalty.

        Adapted from

        https://github.com/huggingface/transformers/blob/ccb92be23def445f2afdea94c31286f84b89eb5b/src/transformers/generation/beam_search.py#L938
        """
        if seq_len is None:
            seq_len = self.get_len()
            # Note: HF implementation does not count the EOS token
            # towards the length, we align with that here for testing.
            if (eos_token_id is not None
                    and self.get_last_token_id() == eos_token_id):
                seq_len -= 1
        return self.get_cumulative_logprob() / (seq_len**length_penalty)

198
199
200
    def is_finished(self) -> bool:
        return SequenceStatus.is_finished(self.status)

201
202
203
204
    def fork(self, new_seq_id: int) -> "Sequence":
        new_seq = copy.deepcopy(self)
        new_seq.seq_id = new_seq_id
        return new_seq
205

Woosuk Kwon's avatar
Woosuk Kwon committed
206
    def __repr__(self) -> str:
207
208
209
        return (f"Sequence(seq_id={self.seq_id}, "
                f"status={self.status.name}, "
                f"num_blocks={len(self.logical_token_blocks)})")
Woosuk Kwon's avatar
Woosuk Kwon committed
210

Woosuk Kwon's avatar
Woosuk Kwon committed
211
212

class SequenceGroup:
213
214
215
216
217
218
219
220
    """A group of sequences that are generated from the same prompt.

    Args:
        request_id: The ID of the request.
        seqs: The list of sequences.
        sampling_params: The sampling parameters used to generate the outputs.
        arrival_time: The arrival time of the request.
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
221
222
223

    def __init__(
        self,
224
        request_id: str,
Woosuk Kwon's avatar
Woosuk Kwon committed
225
        seqs: List[Sequence],
226
        sampling_params: SamplingParams,
227
        arrival_time: float,
Woosuk Kwon's avatar
Woosuk Kwon committed
228
    ) -> None:
229
        self.request_id = request_id
230
        self.seqs_dict = {seq.seq_id: seq for seq in seqs}
231
        self.sampling_params = sampling_params
232
        self.arrival_time = arrival_time
Woosuk Kwon's avatar
Woosuk Kwon committed
233

234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
    def get_max_num_running_seqs(self) -> int:
        """The maximum number of sequences running in parallel in the remaining
        lifetime of the request."""
        if self.sampling_params.use_beam_search:
            # For beam search, maximally there will always be `best_of` beam
            # candidates running in the future.
            return self.sampling_params.best_of
        else:
            if self.sampling_params.best_of > self.num_seqs():
                # At prompt stage, the sequence group is not yet filled up
                # and only have one sequence running. However, in the
                # generation stage, we will have `best_of` sequences running.
                return self.sampling_params.best_of
            # At sampling stages, return the number of actual sequences
            # running.
            return self.num_seqs(status=SequenceStatus.RUNNING)

251
252
253
254
    def get_seqs(
        self,
        status: Optional[SequenceStatus] = None,
    ) -> List[Sequence]:
Woosuk Kwon's avatar
Woosuk Kwon committed
255
        if status is None:
256
            return list(self.seqs_dict.values())
Woosuk Kwon's avatar
Woosuk Kwon committed
257
        else:
258
259
260
261
262
263
            return [
                seq for seq in self.seqs_dict.values() if seq.status == status
            ]

    def get_finished_seqs(self) -> List[Sequence]:
        return [seq for seq in self.seqs_dict.values() if seq.is_finished()]
264
265
266

    def num_seqs(self, status: Optional[SequenceStatus] = None) -> int:
        return len(self.get_seqs(status))
267
268

    def find(self, seq_id: int) -> Sequence:
269
270
271
272
273
274
275
276
277
278
279
280
281
        if seq_id not in self.seqs_dict:
            raise ValueError(f"Sequence {seq_id} not found.")
        return self.seqs_dict[seq_id]

    def add(self, seq: Sequence) -> None:
        if seq.seq_id in self.seqs_dict:
            raise ValueError(f"Sequence {seq.seq_id} already exists.")
        self.seqs_dict[seq.seq_id] = seq

    def remove(self, seq_id: int) -> None:
        if seq_id not in self.seqs_dict:
            raise ValueError(f"Sequence {seq_id} not found.")
        del self.seqs_dict[seq_id]
Woosuk Kwon's avatar
Woosuk Kwon committed
282

Woosuk Kwon's avatar
Woosuk Kwon committed
283
    def is_finished(self) -> bool:
284
        return all(seq.is_finished() for seq in self.get_seqs())
Woosuk Kwon's avatar
Woosuk Kwon committed
285

Woosuk Kwon's avatar
Woosuk Kwon committed
286
    def __repr__(self) -> str:
287
288
        return (f"SequenceGroup(request_id={self.request_id}, "
                f"sampling_params={self.sampling_params}, "
289
                f"num_seqs={len(self.seqs_dict)})")
290
291


292
class SequenceGroupMetadata:
293
294
295
296
297
298
299
300
301
302
303
    """Metadata for a sequence group. Used to create `InputMetadata`.


    Args:
        request_id: The ID of the request.
        is_prompt: Whether the request is at prompt stage.
        seq_data: The sequence data. (Seq id -> sequence data)
        sampling_params: The sampling parameters used to generate the outputs.
        block_tables: The block tables. (Seq id -> list of physical block
            numbers)
    """
304
305
306

    def __init__(
        self,
307
        request_id: str,
308
        is_prompt: bool,
309
        seq_data: Dict[int, SequenceData],
310
        sampling_params: SamplingParams,
311
        block_tables: Dict[int, List[int]],
312
    ) -> None:
313
        self.request_id = request_id
314
        self.is_prompt = is_prompt
315
        self.seq_data = seq_data
316
317
318
319
320
        self.sampling_params = sampling_params
        self.block_tables = block_tables


class SequenceOutputs:
321
322
323
324
325
326
327
328
329
    """The model output associated with a sequence.

    Args:
        parent_seq_id: The ID of the parent sequence (for forking in beam
            search).
        output_token: The output token ID.
        logprobs: The logprobs of the output token.
            (Token id -> logP(x_i+1 | x_0, ..., x_i))
    """
330
331
332
333
334

    def __init__(
        self,
        parent_seq_id: int,
        output_token: int,
335
        logprobs: Dict[int, float],
336
337
338
339
340
341
    ) -> None:
        self.parent_seq_id = parent_seq_id
        self.output_token = output_token
        self.logprobs = logprobs

    def __repr__(self) -> str:
342
        return (f"SequenceOutputs(parent_seq_id={self.parent_seq_id}, "
343
344
                f"output_token={self.output_token}), "
                f"logprobs={self.logprobs}")
Zhuohan Li's avatar
Zhuohan Li committed
345

346
347
    def __eq__(self, other: object) -> bool:
        if not isinstance(other, SequenceOutputs):
348
349
            return NotImplementedError()
        return (self.parent_seq_id == other.parent_seq_id
350
351
                and self.output_token == other.output_token
                and self.logprobs == other.logprobs)
352
353
354
355
356


# For each sequence group, we generate a list of SequenceOutputs object,
# each of which contains one possible candidate for the next token.
SamplerOutput = List[List[SequenceOutputs]]