sequence.py 16.8 KB
Newer Older
1
"""Sequence and its related classes."""
2
import copy
Woosuk Kwon's avatar
Woosuk Kwon committed
3
import enum
4
from dataclasses import dataclass
Zhuohan Li's avatar
Zhuohan Li committed
5
from typing import Dict, List, Optional, Union
Woosuk Kwon's avatar
Woosuk Kwon committed
6

Woosuk Kwon's avatar
Woosuk Kwon committed
7
from vllm.block import LogicalTokenBlock
8
from vllm.prefix import Prefix
Woosuk Kwon's avatar
Woosuk Kwon committed
9
from vllm.sampling_params import SamplingParams
10
from vllm.lora.request import LoRARequest
Woosuk Kwon's avatar
Woosuk Kwon committed
11

12
13
14
PromptLogprobs = List[Optional[Dict[int, float]]]
SampleLogprobs = List[Dict[int, float]]

Woosuk Kwon's avatar
Woosuk Kwon committed
15
16

class SequenceStatus(enum.Enum):
17
    """Status of a sequence."""
18
    WAITING = enum.auto()
Woosuk Kwon's avatar
Woosuk Kwon committed
19
    RUNNING = enum.auto()
Woosuk Kwon's avatar
Woosuk Kwon committed
20
    SWAPPED = enum.auto()
Zhuohan Li's avatar
Zhuohan Li committed
21
22
    FINISHED_STOPPED = enum.auto()
    FINISHED_LENGTH_CAPPED = enum.auto()
23
    FINISHED_ABORTED = enum.auto()
Lily Liu's avatar
Lily Liu committed
24
    FINISHED_IGNORED = enum.auto()
Zhuohan Li's avatar
Zhuohan Li committed
25
26
27
28
29
30

    @staticmethod
    def is_finished(status: "SequenceStatus") -> bool:
        return status in [
            SequenceStatus.FINISHED_STOPPED,
            SequenceStatus.FINISHED_LENGTH_CAPPED,
31
            SequenceStatus.FINISHED_ABORTED,
32
            SequenceStatus.FINISHED_IGNORED,
Zhuohan Li's avatar
Zhuohan Li committed
33
34
35
36
37
38
39
40
        ]

    @staticmethod
    def get_finished_reason(status: "SequenceStatus") -> Union[str, None]:
        if status == SequenceStatus.FINISHED_STOPPED:
            finish_reason = "stop"
        elif status == SequenceStatus.FINISHED_LENGTH_CAPPED:
            finish_reason = "length"
41
42
        elif status == SequenceStatus.FINISHED_ABORTED:
            finish_reason = "abort"
Lily Liu's avatar
Lily Liu committed
43
        elif status == SequenceStatus.FINISHED_IGNORED:
44
45
46
            # The ignored sequences are the sequences whose prompt lengths
            # are longer than the model's length cap. Therefore, the stop
            # reason should also be "length" as in OpenAI API.
Lily Liu's avatar
Lily Liu committed
47
            finish_reason = "length"
Zhuohan Li's avatar
Zhuohan Li committed
48
49
50
        else:
            finish_reason = None
        return finish_reason
Woosuk Kwon's avatar
Woosuk Kwon committed
51

52

53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
@dataclass
class RequestMetrics:
    """Metrics associated with a request.

    Args:
        arrival_time: The time when the request arrived.
        first_scheduled_time: The time when the request was first scheduled.
        first_token_time: The time when the first token was generated.
        time_in_queue: The time the request spent in the queue.
        finished_time: The time when the request was finished.
    """
    arrival_time: float
    last_token_time: float
    first_scheduled_time: Optional[float]
    first_token_time: Optional[float]
    time_in_queue: Optional[float]
    finished_time: Optional[float] = None


72
class SequenceData:
73
74
75
76
77
78
79
80
81
82
    """Data associated with a sequence.

    Args:
        prompt_token_ids: The token IDs of the prompt.

    Attributes:
        prompt_token_ids: The token IDs of the prompt.
        output_token_ids: The token IDs of the output.
        cumulative_logprob: The cumulative log probability of the output.
    """
83
84
85
86
87
88
89

    def __init__(
        self,
        prompt_token_ids: List[int],
    ) -> None:
        self.prompt_token_ids = prompt_token_ids
        self.output_token_ids: List[int] = []
90
91
        self.cumulative_logprob = 0.0

92
    def append_token_id(self, token_id: int, logprob: float) -> None:
93
94
        self.output_token_ids.append(token_id)
        self.cumulative_logprob += logprob
95
96
97
98

    def get_len(self) -> int:
        return len(self.output_token_ids) + len(self.prompt_token_ids)

99
100
101
    def get_prompt_len(self) -> int:
        return len(self.prompt_token_ids)

102
103
104
    def get_output_len(self) -> int:
        return len(self.output_token_ids)

105
106
107
108
109
110
111
112
113
114
115
    def get_token_ids(self) -> List[int]:
        return self.prompt_token_ids + self.output_token_ids

    def get_last_token_id(self) -> int:
        if not self.output_token_ids:
            return self.prompt_token_ids[-1]
        return self.output_token_ids[-1]

    def __repr__(self) -> str:
        return (f"SequenceData("
                f"prompt_token_ids={self.prompt_token_ids}, "
116
117
                f"output_token_ids={self.output_token_ids}, "
                f"cumulative_logprob={self.cumulative_logprob})")
118
119


Woosuk Kwon's avatar
Woosuk Kwon committed
120
class Sequence:
121
122
123
124
125
126
127
128
    """Stores the data, status, and block information of a sequence.

    Args:
        seq_id: The ID of the sequence.
        prompt: The prompt of the sequence.
        prompt_token_ids: The token IDs of the prompt.
        block_size: The block size of the sequence. Should be the same as the
            block size used by the block manager and cache engine.
129
        lora_request: LoRA request.
130
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
131
132
133
134

    def __init__(
        self,
        seq_id: int,
135
        prompt: str,
136
        prompt_token_ids: List[int],
Woosuk Kwon's avatar
Woosuk Kwon committed
137
        block_size: int,
138
        lora_request: Optional[LoRARequest] = None,
Woosuk Kwon's avatar
Woosuk Kwon committed
139
140
    ) -> None:
        self.seq_id = seq_id
141
        self.prompt = prompt
Woosuk Kwon's avatar
Woosuk Kwon committed
142
        self.block_size = block_size
143
        self.lora_request = lora_request
Woosuk Kwon's avatar
Woosuk Kwon committed
144

145
        self.data = SequenceData(prompt_token_ids)
146
        self.output_logprobs: SampleLogprobs = []
147
        self.output_text = ""
148

Woosuk Kwon's avatar
Woosuk Kwon committed
149
        self.logical_token_blocks: List[LogicalTokenBlock] = []
150
        # Initialize the logical token blocks with the prompt token ids.
151
        self._append_tokens_to_blocks(prompt_token_ids)
152
        self.status = SequenceStatus.WAITING
Woosuk Kwon's avatar
Woosuk Kwon committed
153

154
155
156
157
158
159
        # Used for incremental detokenization
        self.prefix_offset = 0
        self.read_offset = 0
        # Input + output tokens
        self.tokens: Optional[List[str]] = None

160
161
162
163
    @property
    def lora_int_id(self) -> int:
        return self.lora_request.lora_int_id if self.lora_request else 0

164
    def _append_logical_block(self) -> None:
Woosuk Kwon's avatar
Woosuk Kwon committed
165
166
167
168
169
170
        block = LogicalTokenBlock(
            block_number=len(self.logical_token_blocks),
            block_size=self.block_size,
        )
        self.logical_token_blocks.append(block)

171
    def _append_tokens_to_blocks(self, token_ids: List[int]) -> None:
172
173
        cursor = 0
        while cursor < len(token_ids):
Woosuk Kwon's avatar
Woosuk Kwon committed
174
            if not self.logical_token_blocks:
175
                self._append_logical_block()
Woosuk Kwon's avatar
Woosuk Kwon committed
176
177
178

            last_block = self.logical_token_blocks[-1]
            if last_block.is_full():
179
                self._append_logical_block()
Woosuk Kwon's avatar
Woosuk Kwon committed
180
181
182
                last_block = self.logical_token_blocks[-1]

            num_empty_slots = last_block.get_num_empty_slots()
183
184
185
            last_block.append_tokens(token_ids[cursor:cursor +
                                               num_empty_slots])
            cursor += num_empty_slots
Woosuk Kwon's avatar
Woosuk Kwon committed
186

187
188
189
190
191
    def append_token_id(
        self,
        token_id: int,
        logprobs: Dict[int, float],
    ) -> None:
192
        assert token_id in logprobs
193
        self._append_tokens_to_blocks([token_id])
194
        self.output_logprobs.append(logprobs)
195
        self.data.append_token_id(token_id, logprobs[token_id])
196

Woosuk Kwon's avatar
Woosuk Kwon committed
197
    def get_len(self) -> int:
198
        return self.data.get_len()
Woosuk Kwon's avatar
Woosuk Kwon committed
199

200
201
202
    def get_prompt_len(self) -> int:
        return self.data.get_prompt_len()

203
204
205
    def get_output_len(self) -> int:
        return self.data.get_output_len()

Woosuk Kwon's avatar
Woosuk Kwon committed
206
    def get_token_ids(self) -> List[int]:
207
        return self.data.get_token_ids()
Woosuk Kwon's avatar
Woosuk Kwon committed
208

209
    def get_last_token_id(self) -> int:
210
        return self.data.get_last_token_id()
211

212
213
214
215
216
217
    def get_output_token_ids(self) -> List[int]:
        return self.data.output_token_ids

    def get_cumulative_logprob(self) -> float:
        return self.data.cumulative_logprob

218
    def get_beam_search_score(self,
219
                              length_penalty: float = 1.0,
220
221
222
223
224
225
226
227
228
229
                              seq_len: Optional[int] = None,
                              eos_token_id: Optional[int] = None) -> float:
        """Calculate the beam search score with length penalty.

        Adapted from

        https://github.com/huggingface/transformers/blob/ccb92be23def445f2afdea94c31286f84b89eb5b/src/transformers/generation/beam_search.py#L938
        """
        if seq_len is None:
            seq_len = self.get_len()
230
            # NOTE: HF implementation does not count the EOS token
231
232
233
234
235
236
            # towards the length, we align with that here for testing.
            if (eos_token_id is not None
                    and self.get_last_token_id() == eos_token_id):
                seq_len -= 1
        return self.get_cumulative_logprob() / (seq_len**length_penalty)

237
238
239
    def is_finished(self) -> bool:
        return SequenceStatus.is_finished(self.status)

240
241
242
243
    def fork(self, new_seq_id: int) -> "Sequence":
        new_seq = copy.deepcopy(self)
        new_seq.seq_id = new_seq_id
        return new_seq
244

Woosuk Kwon's avatar
Woosuk Kwon committed
245
    def __repr__(self) -> str:
246
247
248
        return (f"Sequence(seq_id={self.seq_id}, "
                f"status={self.status.name}, "
                f"num_blocks={len(self.logical_token_blocks)})")
Woosuk Kwon's avatar
Woosuk Kwon committed
249

Woosuk Kwon's avatar
Woosuk Kwon committed
250
251

class SequenceGroup:
252
253
254
255
256
257
258
    """A group of sequences that are generated from the same prompt.

    Args:
        request_id: The ID of the request.
        seqs: The list of sequences.
        sampling_params: The sampling parameters used to generate the outputs.
        arrival_time: The arrival time of the request.
259
        lora_request: LoRA request.
zspo's avatar
zspo committed
260
        prefix: The prefix of the prompt of the sequence group.
261
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
262
263
264

    def __init__(
        self,
265
        request_id: str,
Woosuk Kwon's avatar
Woosuk Kwon committed
266
        seqs: List[Sequence],
267
        sampling_params: SamplingParams,
268
        arrival_time: float,
269
        lora_request: Optional[LoRARequest] = None,
270
        prefix: Optional[Prefix] = None,
Woosuk Kwon's avatar
Woosuk Kwon committed
271
    ) -> None:
272
        self.request_id = request_id
273
        self.seqs_dict = {seq.seq_id: seq for seq in seqs}
274
        self.sampling_params = sampling_params
275
276
277
278
279
        self.metrics = RequestMetrics(arrival_time=arrival_time,
                                      last_token_time=arrival_time,
                                      first_scheduled_time=None,
                                      first_token_time=None,
                                      time_in_queue=None)
280
        self.lora_request = lora_request
281
        self.prefix: Optional[Prefix] = prefix
282
283
284
285
286
287
288
289
290
291
292
293
294
        self.prompt_logprobs: Optional[PromptLogprobs] = None

    @property
    def prompt(self) -> str:
        # All sequences in the group should have the same prompt.
        # We use the prompt of an arbitrary sequence.
        return next(iter(self.seqs_dict.values())).prompt

    @property
    def prompt_token_ids(self) -> List[int]:
        # All sequences in the group should have the same prompt.
        # We use the prompt of an arbitrary sequence.
        return next(iter(self.seqs_dict.values())).data.prompt_token_ids
Woosuk Kwon's avatar
Woosuk Kwon committed
295

296
297
298
299
    @property
    def lora_int_id(self) -> int:
        return self.lora_request.lora_int_id if self.lora_request else 0

300
301
    def get_last_latency(self, now: float) -> float:
        """Gets last token latency for Request level timings."""
302
303
        latency = now - self.metrics.last_token_time
        self.metrics.last_token_time = now
304
305
        return latency

306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
    def maybe_set_first_token_time(self, time: float) -> None:
        """Sets the first token time for Request level timings."""
        if self.metrics.first_token_time is None:
            self.metrics.first_token_time = time

    def maybe_set_first_scheduled_time(self, time: float) -> None:
        """Sets the first scheduled time and time in queue for Request level timings."""
        if self.metrics.first_scheduled_time is None:
            self.metrics.first_scheduled_time = time
            self.metrics.time_in_queue = time - self.metrics.arrival_time

    def set_finished_time(self, time: Optional[float]) -> None:
        """Sets the finished time for Request level timings."""
        self.metrics.finished_time = time

321
322
323
324
325
326
327
328
329
330
331
332
333
334
    def get_max_num_running_seqs(self) -> int:
        """The maximum number of sequences running in parallel in the remaining
        lifetime of the request."""
        if self.sampling_params.use_beam_search:
            # For beam search, maximally there will always be `best_of` beam
            # candidates running in the future.
            return self.sampling_params.best_of
        else:
            if self.sampling_params.best_of > self.num_seqs():
                # At prompt stage, the sequence group is not yet filled up
                # and only have one sequence running. However, in the
                # generation stage, we will have `best_of` sequences running.
                return self.sampling_params.best_of
            # At sampling stages, return the number of actual sequences
335
336
            # that are not finished yet.
            return self.num_unfinished_seqs()
337

338
339
340
341
    def get_seqs(
        self,
        status: Optional[SequenceStatus] = None,
    ) -> List[Sequence]:
Woosuk Kwon's avatar
Woosuk Kwon committed
342
        if status is None:
343
            return list(self.seqs_dict.values())
Woosuk Kwon's avatar
Woosuk Kwon committed
344
        else:
345
346
347
348
            return [
                seq for seq in self.seqs_dict.values() if seq.status == status
            ]

349
350
351
352
353
    def get_unfinished_seqs(self) -> List[Sequence]:
        return [
            seq for seq in self.seqs_dict.values() if not seq.is_finished()
        ]

354
355
    def get_finished_seqs(self) -> List[Sequence]:
        return [seq for seq in self.seqs_dict.values() if seq.is_finished()]
356
357
358

    def num_seqs(self, status: Optional[SequenceStatus] = None) -> int:
        return len(self.get_seqs(status))
359

360
361
362
363
364
365
    def num_unfinished_seqs(self) -> int:
        return len(self.get_unfinished_seqs())

    def num_finished_seqs(self) -> int:
        return len(self.get_finished_seqs())

366
    def find(self, seq_id: int) -> Sequence:
367
368
369
370
371
372
373
374
375
376
377
378
379
        if seq_id not in self.seqs_dict:
            raise ValueError(f"Sequence {seq_id} not found.")
        return self.seqs_dict[seq_id]

    def add(self, seq: Sequence) -> None:
        if seq.seq_id in self.seqs_dict:
            raise ValueError(f"Sequence {seq.seq_id} already exists.")
        self.seqs_dict[seq.seq_id] = seq

    def remove(self, seq_id: int) -> None:
        if seq_id not in self.seqs_dict:
            raise ValueError(f"Sequence {seq_id} not found.")
        del self.seqs_dict[seq_id]
Woosuk Kwon's avatar
Woosuk Kwon committed
380

Woosuk Kwon's avatar
Woosuk Kwon committed
381
    def is_finished(self) -> bool:
382
        return all(seq.is_finished() for seq in self.get_seqs())
Woosuk Kwon's avatar
Woosuk Kwon committed
383

Woosuk Kwon's avatar
Woosuk Kwon committed
384
    def __repr__(self) -> str:
385
386
        return (f"SequenceGroup(request_id={self.request_id}, "
                f"sampling_params={self.sampling_params}, "
387
                f"num_seqs={len(self.seqs_dict)})")
388
389


390
class SequenceGroupMetadata:
391
392
393
394
395
396
397
398
399
    """Metadata for a sequence group. Used to create `InputMetadata`.

    Args:
        request_id: The ID of the request.
        is_prompt: Whether the request is at prompt stage.
        seq_data: The sequence data. (Seq id -> sequence data)
        sampling_params: The sampling parameters used to generate the outputs.
        block_tables: The block tables. (Seq id -> list of physical block
            numbers)
400
        lora_request: LoRA request.
401
        prefix: The prefix of the prompt of the sequence group.
402
    """
403
404
405

    def __init__(
        self,
406
        request_id: str,
407
        is_prompt: bool,
408
        seq_data: Dict[int, SequenceData],
409
        sampling_params: SamplingParams,
410
        block_tables: Dict[int, List[int]],
411
        lora_request: Optional[LoRARequest] = None,
412
        prefix: Optional[Prefix] = None,
413
    ) -> None:
414
        self.request_id = request_id
415
        self.is_prompt = is_prompt
416
        self.seq_data = seq_data
417
418
        self.sampling_params = sampling_params
        self.block_tables = block_tables
419
        self.lora_request = lora_request
420
        self.prefix = prefix
421

422
423
424
425
    @property
    def lora_int_id(self) -> int:
        return self.lora_request.lora_int_id if self.lora_request else 0

426

Zhuohan Li's avatar
Zhuohan Li committed
427
class SequenceOutput:
428
429
430
431
432
433
434
435
436
    """The model output associated with a sequence.

    Args:
        parent_seq_id: The ID of the parent sequence (for forking in beam
            search).
        output_token: The output token ID.
        logprobs: The logprobs of the output token.
            (Token id -> logP(x_i+1 | x_0, ..., x_i))
    """
437
438
439
440
441

    def __init__(
        self,
        parent_seq_id: int,
        output_token: int,
442
        logprobs: Dict[int, float],
443
444
445
446
447
448
    ) -> None:
        self.parent_seq_id = parent_seq_id
        self.output_token = output_token
        self.logprobs = logprobs

    def __repr__(self) -> str:
Zhuohan Li's avatar
Zhuohan Li committed
449
        return (f"SequenceOutput(parent_seq_id={self.parent_seq_id}, "
450
451
                f"output_token={self.output_token}, "
                f"logprobs={self.logprobs})")
Zhuohan Li's avatar
Zhuohan Li committed
452

453
    def __eq__(self, other: object) -> bool:
Zhuohan Li's avatar
Zhuohan Li committed
454
        if not isinstance(other, SequenceOutput):
Zhuohan Li's avatar
Zhuohan Li committed
455
            raise NotImplementedError()
456
        return (self.parent_seq_id == other.parent_seq_id
457
458
                and self.output_token == other.output_token
                and self.logprobs == other.logprobs)
459
460


Zhuohan Li's avatar
Zhuohan Li committed
461
462
class SequenceGroupOutput:
    """The model output associated with a sequence group."""
463
464
465

    def __init__(
        self,
Zhuohan Li's avatar
Zhuohan Li committed
466
        samples: List[SequenceOutput],
467
468
469
470
471
472
        prompt_logprobs: Optional[PromptLogprobs],
    ) -> None:
        self.samples = samples
        self.prompt_logprobs = prompt_logprobs

    def __repr__(self) -> str:
Zhuohan Li's avatar
Zhuohan Li committed
473
        return (f"SequenceGroupOutput(samples={self.samples}, "
474
475
                f"prompt_logprobs={self.prompt_logprobs})")

476
    def __eq__(self, other: object) -> bool:
Zhuohan Li's avatar
Zhuohan Li committed
477
        if not isinstance(other, SequenceGroupOutput):
478
479
480
481
            raise NotImplementedError()
        return (self.samples == other.samples
                and self.prompt_logprobs == other.prompt_logprobs)

482

Zhuohan Li's avatar
Zhuohan Li committed
483
# For each sequence group, we generate a list of SequenceOutput object,
484
# each of which contains one possible candidate for the next token.
Zhuohan Li's avatar
Zhuohan Li committed
485
SamplerOutput = List[SequenceGroupOutput]