scheduler.py 18.1 KB
Newer Older
1
2
import enum
import time
3
from typing import Dict, List, Optional, Tuple
Woosuk Kwon's avatar
Woosuk Kwon committed
4

5
from cacheflow.core.block_manager import BlockSpaceManager
Woosuk Kwon's avatar
Woosuk Kwon committed
6
from cacheflow.logger import init_logger
7
from cacheflow.core.policy import PolicyFactory
Woosuk Kwon's avatar
Woosuk Kwon committed
8
from cacheflow.sampling_params import SamplingParams
9
10
from cacheflow.sequence import (Sequence, SequenceGroup, SequenceGroupMetadata,
                                SequenceOutputs, SequenceStatus)
Woosuk Kwon's avatar
Woosuk Kwon committed
11

Woosuk Kwon's avatar
Woosuk Kwon committed
12
logger = init_logger(__name__)
13

Woosuk Kwon's avatar
Woosuk Kwon committed
14
15
16
_LOGGING_INTERVAL_SEC = 10


17
18
19
20
21
22
23
24
25
26
27
28
29
class PreemptionMode(enum.Enum):
    """Preemption modes.

    1. Swapping: Swap out the blocks of the preempted sequences to CPU memory
    and swap them back in when the sequences are resumed.
    2. Recomputation: Discard the blocks of the preempted sequences and
    recompute them when the sequences are resumed, treating the sequences as
    new prompts.
    """
    SWAP = enum.auto()
    RECOMPUTE = enum.auto()


Woosuk Kwon's avatar
Woosuk Kwon committed
30
31
class Scheduler:

Woosuk Kwon's avatar
Woosuk Kwon committed
32
    def __init__(
Woosuk Kwon's avatar
Woosuk Kwon committed
33
        self,
Woosuk Kwon's avatar
Woosuk Kwon committed
34
        controllers: List,
Woosuk Kwon's avatar
Woosuk Kwon committed
35
36
37
        block_size: int,
        num_gpu_blocks: int,
        num_cpu_blocks: int,
38
        max_num_batched_tokens: int,
39
        max_num_sequences: int,
Woosuk Kwon's avatar
Woosuk Kwon committed
40
        log_stats: bool,
Woosuk Kwon's avatar
Woosuk Kwon committed
41
    ) -> None:
Woosuk Kwon's avatar
Woosuk Kwon committed
42
43
44
45
        self.controllers = controllers
        self.block_size = block_size
        self.num_gpu_blocks = num_gpu_blocks
        self.num_cpu_blocks = num_cpu_blocks
46
        self.max_num_batched_tokens = max_num_batched_tokens
47
        self.max_num_sequences = max_num_sequences
Woosuk Kwon's avatar
Woosuk Kwon committed
48
        self.log_stats = log_stats
Woosuk Kwon's avatar
Woosuk Kwon committed
49

50
51
        # Instantiate the scheduling policy.
        self.policy = PolicyFactory.get_policy(policy_name='fcfs')
Woosuk Kwon's avatar
Woosuk Kwon committed
52
        # Create the block space manager.
Woosuk Kwon's avatar
Woosuk Kwon committed
53
54
55
56
57
58
        self.block_manager = BlockSpaceManager(
            block_size=block_size,
            num_gpu_blocks=num_gpu_blocks,
            num_cpu_blocks=num_cpu_blocks,
        )

59
60
61
        # Sequence groups in the WAITING state.
        self.waiting: List[SequenceGroup] = []
        # Sequence groups in the RUNNING state.
62
        self.running: List[SequenceGroup] = []
Woosuk Kwon's avatar
Woosuk Kwon committed
63
64
        # Mapping: group_id -> num_steps.
        self.num_steps: Dict[int, int] = {}
Woosuk Kwon's avatar
Woosuk Kwon committed
65
66
        # Mapping: group_id -> sampling params.
        self.sampling_params: Dict[int, SamplingParams] = {}
67
        # Sequence groups in the SWAPPED state.
Woosuk Kwon's avatar
Woosuk Kwon committed
68
        self.swapped: List[SequenceGroup] = []
Woosuk Kwon's avatar
Woosuk Kwon committed
69

Woosuk Kwon's avatar
Woosuk Kwon committed
70
71
72
        self.last_logging_time: float = 0.0
        # List[timestamp, num_tokens]
        self.num_input_tokens: List[Tuple[float, int]] = []
73

74
75
    def add_sequence_groups(
        self,
76
        seq_groups: List[Tuple[SequenceGroup, SamplingParams]],
77
    ) -> None:
78
79
80
        # Add sequence groups to the waiting queue.
        for seq_group, sampling_params in seq_groups:
            self.waiting.append(seq_group)
Woosuk Kwon's avatar
Woosuk Kwon committed
81
82
            self.sampling_params[seq_group.group_id] = sampling_params

83
    def _schedule(
84
        self,
85
    ) -> Tuple[Dict[int, int], Dict[int, int], Dict[int, List[int]], List[int]]:
86
87
88
        # Blocks that need to be swaped or copied before model execution.
        blocks_to_swap_in: Dict[int, int] = {}
        blocks_to_swap_out: Dict[int, int] = {}
89
        blocks_to_copy: Dict[int, List[int]] = {}
90

91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
        # Fix the current time.
        now = time.time()

        # NOTE(woosuk): We prioritize the sequence groups in the RUNNING state
        # in order to minimize the preemption overheads.
        # Preemption happens only when there is no available slot to keep all
        # the sequence groups in the RUNNING state.
        # In this case, the policy is responsible for deciding which sequence
        # groups to preempt.
        self.running = self.policy.sort_by_priority(now, self.running)

        # Reserve new token slots for the running sequence groups.
        running: List[SequenceGroup] = []
        preempted: List[SequenceGroup] = []
        while self.running:
            seq_group = self.running.pop(0)
107
            while not self.block_manager.can_append_slot(seq_group):
108
109
110
111
112
113
114
115
116
117
                if self.running:
                    # Preempt the lowest-priority sequence groups.
                    victim_seq_group = self.running.pop(-1)
                    self._preempt(victim_seq_group, blocks_to_swap_out)
                    preempted.append(victim_seq_group)
                else:
                    # No other sequence groups can be preempted.
                    # Preempt the current sequence group.
                    self._preempt(seq_group, blocks_to_swap_out)
                    preempted.append(seq_group)
Woosuk Kwon's avatar
Woosuk Kwon committed
118
119
                    break
            else:
120
                # Append new slots to the sequence group.
121
                self._append_slot(seq_group, blocks_to_copy)
122
123
124
125
126
                running.append(seq_group)
        self.running = running

        # Swap in the sequence groups in the SWAPPED state if possible.
        self.swapped = self.policy.sort_by_priority(now, self.swapped)
127
        while self.swapped and not blocks_to_swap_out:
128
129
130
131
132
133
            seq_group = self.swapped[0]
            # If the sequence group has been preempted in this step, stop.
            if seq_group in preempted:
                break
            # If the sequence group cannot be swapped in, stop.
            if not self.block_manager.can_swap_in(seq_group):
Woosuk Kwon's avatar
Woosuk Kwon committed
134
135
                break

136
137
138
139
140
141
            # The total number of sequences in the RUNNING state should not
            # exceed the maximum number of sequences.
            num_seqs = seq_group.num_seqs(status=SequenceStatus.SWAPPED)
            if len(self.running) + num_seqs > self.max_num_sequences:
                break

142
143
            seq_group = self.swapped.pop(0)
            self._swap_in(seq_group, blocks_to_swap_in)
144
            self._append_slot(seq_group, blocks_to_copy)
145
            self.running.append(seq_group)
146

147
148
149
150
151
        num_batched_tokens = sum(
            seq_group.num_seqs(status=SequenceStatus.RUNNING)
            for seq_group in self.running
        )

152
153
154
155
156
157
        # Join waiting sequences if possible.
        prompt_group_ids: List[int] = []
        # NOTE(woosuk): The sequence groups in the SWAPPED state are strictly
        # prioritized over the sequence groups in the WAITING state.
        # This is because we want to bound the amount of CPU memory taken by
        # the swapped sequence groups.
Woosuk Kwon's avatar
Woosuk Kwon committed
158
        if not self.swapped:
159
160
161
            # Optimization: We do not sort the waiting queue since the preempted
            # sequence groups are added to the front and the new sequence groups
            # are added to the back.
162
163
164
165
166
167
168
169
170
171
            while self.waiting:
                seq_group = self.waiting[0]
                # If the sequence group has been preempted in this step, stop.
                if seq_group in preempted:
                    break
                # If the sequence group cannot be allocated, stop.
                if not self.block_manager.can_allocate(seq_group):
                    break

                # If the number of batched tokens exceeds the limit, stop.
172
                num_prompt_tokens = seq_group.seqs[0].get_len()
173
174
175
176
                if (num_batched_tokens + num_prompt_tokens
                    > self.max_num_batched_tokens):
                    break

177
178
179
180
181
182
                # The total number of sequences in the RUNNING state should not
                # exceed the maximum number of sequences.
                num_seqs = seq_group.num_seqs(status=SequenceStatus.WAITING)
                if len(self.running) + num_seqs > self.max_num_sequences:
                    break

183
184
185
186
187
                seq_group = self.waiting.pop(0)
                self._allocate(seq_group)
                self.running.append(seq_group)
                num_batched_tokens += num_prompt_tokens
                prompt_group_ids.append(seq_group.group_id)
Woosuk Kwon's avatar
Woosuk Kwon committed
188

Woosuk Kwon's avatar
Woosuk Kwon committed
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
        if not self.log_stats:
            return (blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy,
                    prompt_group_ids)

        now = time.time()
        if num_batched_tokens > 0:
            self.num_input_tokens.append((now, num_batched_tokens))
        elapsed_time = now - self.last_logging_time
        if elapsed_time > _LOGGING_INTERVAL_SEC:
            self.last_logging_time = now
            self.num_input_tokens = [
                (t, n) for t, n in self.num_input_tokens
                if now - t < _LOGGING_INTERVAL_SEC
            ]
            if len(self.num_input_tokens) > 1:
                total_num_tokens = sum(n for _, n in self.num_input_tokens[:-1])
                window = now - self.num_input_tokens[0][0]
                avg_throughput = total_num_tokens / window
            else:
                avg_throughput = 0.0

            num_free_gpu_blocks = self.block_manager.get_num_free_gpu_blocks()
            num_used_gpu_blocks = self.num_gpu_blocks - num_free_gpu_blocks
            gpu_cache_usage = num_used_gpu_blocks / self.num_gpu_blocks
            if self.num_cpu_blocks > 0:
214
215
                num_free_cpu_blocks = self.block_manager.get_num_free_cpu_blocks()
                num_used_cpu_blocks = self.num_cpu_blocks - num_free_cpu_blocks
Woosuk Kwon's avatar
Woosuk Kwon committed
216
217
218
219
220
221
222
223
224
225
226
227
228
                cpu_cache_usage = num_used_cpu_blocks / self.num_cpu_blocks
            else:
                cpu_cache_usage = 0.0

            logger.info(
                f"Throughput: {avg_throughput:.1f} tokens/s, "
                f"Running: {len(self.running)} reqs, "
                f"Swapped: {len(self.swapped)} reqs, "
                f"Pending: {len(self.waiting)} reqs, "
                f"GPU KV cache usage: {gpu_cache_usage * 100:.1f}%, "
                f"CPU KV cache usage: {cpu_cache_usage * 100:.1f}%")

        return (blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy,
229
230
231
232
233
234
235
236
237
238
239
240
241
                prompt_group_ids)

    def step(self) -> List[SequenceGroup]:
        # Schedule sequence groups.
        # This function call changes the internal states of the scheduler
        # such as self.running, self.swapped, and self.waiting.
        scheduler_output = self._schedule()
        blocks_to_swap_in = scheduler_output[0]
        blocks_to_swap_out = scheduler_output[1]
        blocks_to_copy = scheduler_output[2]
        prompt_group_ids = scheduler_output[3]

        # Create input data structures.
242
        seq_group_metadata_list: List[SequenceGroupMetadata] = []
243
244
        updated_seq_groups: List[SequenceGroup] = self.running.copy()

245
246
        for seq_group in self.running:
            group_id = seq_group.group_id
247
            is_prompt = group_id in prompt_group_ids
248

249
250
251
252
            input_tokens: Dict[int, List[int]] = {}
            seq_logprobs: Dict[int, float] = {}
            block_tables: Dict[int, List[int]] = {}
            for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
253
254
255
                seq_id = seq.seq_id
                block_tables[seq_id] = self.block_manager.get_block_table(seq)
                if is_prompt:
256
                    input_tokens[seq_id] = seq.get_token_ids()
257
                else:
258
259
260
261
262
263
                    input_tokens[seq_id] = [seq.get_last_token_id()]
                seq_logprobs[seq_id] = seq.cumulative_logprobs
                # NOTE(woosuk): Sequences in the same group have the same
                # sequence length
                seq_len = seq.get_len()

264
            seq_group_metadata = SequenceGroupMetadata(
265
266
267
268
269
270
271
272
                group_id=group_id,
                is_prompt=is_prompt,
                input_tokens=input_tokens,
                context_len=seq_len,
                seq_logprobs=seq_logprobs,
                sampling_params=self.sampling_params[group_id],
                block_tables=block_tables,
            )
273
            seq_group_metadata_list.append(seq_group_metadata)
274

275
        # Execute the first stage of the pipeline.
276
        if seq_group_metadata_list or blocks_to_swap_in or blocks_to_swap_out:
277
278
            # Swap in and swap out should never happen at the same time.
            assert not (blocks_to_swap_in and blocks_to_swap_out)
Woosuk Kwon's avatar
Woosuk Kwon committed
279
            self.controllers[0].execute_stage(
280
                seq_group_metadata_list,
281
282
283
                blocks_to_swap_in=blocks_to_swap_in,
                blocks_to_swap_out=blocks_to_swap_out,
                blocks_to_copy=blocks_to_copy,
Woosuk Kwon's avatar
Woosuk Kwon committed
284
            )
Woosuk Kwon's avatar
Woosuk Kwon committed
285

286
287
        return updated_seq_groups

Woosuk Kwon's avatar
Woosuk Kwon committed
288
289
    def post_step(
        self,
290
        seq_outputs: Dict[int, SequenceOutputs],
Woosuk Kwon's avatar
Woosuk Kwon committed
291
292
    ) -> None:
        # Update the running sequences and free blocks.
293
        for seq_group in self.running:
Woosuk Kwon's avatar
Woosuk Kwon committed
294
295
            group_id = seq_group.group_id
            self.num_steps[group_id] += 1
Woosuk Kwon's avatar
Woosuk Kwon committed
296
            stop_token_ids = self.sampling_params[group_id].stop_token_ids
Woosuk Kwon's avatar
Woosuk Kwon committed
297

298
            # Process beam search results before processing the next tokens.
Woosuk Kwon's avatar
Woosuk Kwon committed
299
300
301
302
            for seq in seq_group.seqs:
                if seq.status == SequenceStatus.FINISHED:
                    continue

303
304
                output = seq_outputs[seq.seq_id]
                if seq.seq_id != output.parent_seq_id:
Woosuk Kwon's avatar
Woosuk Kwon committed
305
306
307
308
                    # The sequence is a fork of the parent sequence (beam search).
                    # Free the current sequence.
                    self.block_manager.free(seq)
                    # Fork the parent sequence.
309
310
                    parent_seq = seq_group.find(output.parent_seq_id)
                    parent_seq.fork(seq)
Woosuk Kwon's avatar
Woosuk Kwon committed
311
312
                    self.block_manager.fork(parent_seq, seq)

313
314
315
316
317
            # Process the next tokens.
            for seq in seq_group.seqs:
                if seq.status == SequenceStatus.FINISHED:
                    continue

Woosuk Kwon's avatar
Woosuk Kwon committed
318
                # Append a new token to the sequence.
319
                output = seq_outputs[seq.seq_id]
320
                seq.append_token(output.output_token, output.logprobs)
Woosuk Kwon's avatar
Woosuk Kwon committed
321
322

                # Check if the sequence has generated a stop token.
323
                if output.output_token in stop_token_ids:
Woosuk Kwon's avatar
Woosuk Kwon committed
324
325
326
327
                    self._free_seq(seq)
                    continue

                # Check if the sequence has reached the maximum number of steps.
Woosuk Kwon's avatar
Woosuk Kwon committed
328
329
                max_num_steps = self.sampling_params[group_id].max_num_steps
                if self.num_steps[group_id] == max_num_steps:
Woosuk Kwon's avatar
Woosuk Kwon committed
330
331
332
                    self._free_seq(seq)
                    continue

333
334
335
        # Update the running sequences.
        running: List[SequenceGroup] = []
        for seq_group in self.running:
Woosuk Kwon's avatar
Woosuk Kwon committed
336
            if seq_group.is_finished():
337
                self._free_seq_group(seq_group)
Woosuk Kwon's avatar
Woosuk Kwon committed
338
            else:
339
340
                running.append(seq_group)
        self.running = running
Woosuk Kwon's avatar
Woosuk Kwon committed
341

342
343
344
345
346
347
348
    def _allocate(self, seq_group: SequenceGroup) -> None:
        self.block_manager.allocate(seq_group)
        for seq in seq_group.seqs:
            seq.status = SequenceStatus.RUNNING
        if seq_group.group_id not in self.num_steps:
            self.num_steps[seq_group.group_id] = 0

349
    def _append_slot(
350
351
352
353
354
        self,
        seq_group: SequenceGroup,
        blocks_to_copy: Dict[int, List[int]],
    ) -> None:
        for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
355
            ret = self.block_manager.append_slot(seq)
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
            if ret is not None:
                src_block, dst_block = ret
                if src_block in blocks_to_copy:
                    blocks_to_copy[src_block].append(dst_block)
                else:
                    blocks_to_copy[src_block] = [dst_block]

    def _preempt(
        self,
        seq_group: SequenceGroup,
        blocks_to_swap_out: Dict[int, int],
        preemption_mode: Optional[PreemptionMode] = None,
    ) -> None:
        # If preemption mode is not specified, we determine the mode as follows:
        # We use recomputation by default since it incurs lower overhead than
        # swapping. However, when the sequence group has multiple sequences
        # (e.g., beam search), recomputation is not supported. In such a case,
        # we use swapping instead.
        # FIXME(woosuk): This makes our scheduling policy a bit bizarre.
        # As swapped sequences are prioritized over waiting sequences,
        # sequence groups with multiple sequences are implicitly prioritized
        # over sequence groups with a single sequence.
        # TODO(woosuk): Support recomputation for sequence groups with multiple
        # sequences. This may require a more sophisticated CUDA kernel.
        if preemption_mode is None:
            seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING)
            if len(seqs) == 1:
                preemption_mode = PreemptionMode.RECOMPUTE
            else:
                preemption_mode = PreemptionMode.SWAP
        if preemption_mode == PreemptionMode.RECOMPUTE:
            self._preempt_by_recompute(seq_group)
        elif preemption_mode == PreemptionMode.SWAP:
            self._preempt_by_swap(seq_group, blocks_to_swap_out)
        else:
            assert False, 'Invalid preemption mode.'

    def _preempt_by_recompute(
        self,
        seq_group: SequenceGroup,
    ) -> None:
        seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING)
        assert len(seqs) == 1
        for seq in seqs:
            seq.status = SequenceStatus.WAITING
            self.block_manager.free(seq)
402
403
404
        # NOTE: For FCFS, we insert the preempted sequence group to the front
        # of the waiting queue.
        self.waiting.insert(0, seq_group)
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420

    def _preempt_by_swap(
        self,
        seq_group: SequenceGroup,
        blocks_to_swap_out: Dict[int, int],
    ) -> None:
        seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING)
        for seq in seqs:
            seq.status = SequenceStatus.SWAPPED
        self._swap_out(seq_group, blocks_to_swap_out)
        self.swapped.append(seq_group)

    def _free_seq(self, seq: Sequence) -> None:
        seq.status = SequenceStatus.FINISHED
        self.block_manager.free(seq)

421
    def _free_seq_group(self, seq_group: SequenceGroup) -> None:
Woosuk Kwon's avatar
Woosuk Kwon committed
422
423
424
        group_id = seq_group.group_id
        del self.num_steps[group_id]
        del self.sampling_params[group_id]
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445

    def _swap_in(
        self,
        seq_group: SequenceGroup,
        blocks_to_swap_in: Dict[int, int],
    ) -> None:
        mapping = self.block_manager.swap_in(seq_group)
        blocks_to_swap_in.update(mapping)
        for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED):
            seq.status = SequenceStatus.RUNNING

    def _swap_out(
        self,
        seq_group: SequenceGroup,
        blocks_to_swap_out: Dict[int, int],
    ) -> None:
        assert self.block_manager.can_swap_out(seq_group)
        mapping = self.block_manager.swap_out(seq_group)
        blocks_to_swap_out.update(mapping)
        for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
            seq.status = SequenceStatus.SWAPPED