schedule_batch.py 74.5 KB
Newer Older
1
2
from __future__ import annotations

3
4
5
6
7
8
9
10
11
12
13
14
15
# Copyright 2023-2024 SGLang Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
16
17
18
19
20
21
22
23
24
25
"""
Store information about requests and batches.

The following is the flow of data structures for a batch:

ScheduleBatch -> ModelWorkerBatch -> ForwardBatch

- ScheduleBatch is managed by `scheduler.py::Scheduler`.
  It contains high-level scheduling data. Most of the data is on the CPU.
- ModelWorkerBatch is managed by `tp_worker.py::TpModelWorker`.
26
27
  It is a subset of `ScheduleBatch` that only contains data related to the model forward on GPU.
  It will be transformed from CPU scheduler to GPU model runner.
28
29
- ForwardBatch is managed by `model_runner.py::ModelRunner`.
  It contains low-level tensor data. Most of the data consists of GPU tensors.
Lianmin Zheng's avatar
Lianmin Zheng committed
30
31

TODO(lmzheng): ModelWorkerBatch seems a bit redundant and we consider removing it in the future.
32
"""
Lianmin Zheng's avatar
Lianmin Zheng committed
33

34
import copy
35
import dataclasses
Ying Sheng's avatar
Ying Sheng committed
36
import logging
37
import threading
Lianmin Zheng's avatar
Lianmin Zheng committed
38
from enum import Enum, auto
39
from http import HTTPStatus
40
from typing import TYPE_CHECKING, Any, List, Optional, Set, Tuple, Union
Lianmin Zheng's avatar
Lianmin Zheng committed
41

42
import numpy as np
Lianmin Zheng's avatar
Lianmin Zheng committed
43
import torch
44
45
import triton
import triton.language as tl
46

Liangsheng Yin's avatar
Liangsheng Yin committed
47
from sglang.global_config import global_config
48
from sglang.srt.constrained.base_grammar_backend import BaseGrammarObject
49
from sglang.srt.disaggregation.base import BaseKVSender
Byron Hsu's avatar
Byron Hsu committed
50
51
52
from sglang.srt.disaggregation.decode_schedule_batch_mixin import (
    ScheduleBatchDisaggregationDecodeMixin,
)
53
from sglang.srt.distributed.parallel_state import get_tensor_model_parallel_rank
Hanming Lu's avatar
Hanming Lu committed
54
55
56
57
from sglang.srt.mem_cache.allocator import (
    BaseTokenToKVPoolAllocator,
    SWATokenToKVPoolAllocator,
)
58
from sglang.srt.mem_cache.base_prefix_cache import BasePrefixCache
tarinkk's avatar
tarinkk committed
59
from sglang.srt.mem_cache.chunk_cache import ChunkCache, SWAChunkCache
60
from sglang.srt.mem_cache.memory_pool import ReqToTokenPool
Hanming Lu's avatar
Hanming Lu committed
61
from sglang.srt.mem_cache.swa_radix_cache import SWARadixCache
62
from sglang.srt.metrics.collector import TimeStats
Lianmin Zheng's avatar
Lianmin Zheng committed
63
from sglang.srt.model_executor.forward_batch_info import CaptureHiddenMode, ForwardMode
64
from sglang.srt.sampling.sampling_batch_info import SamplingBatchInfo
65
from sglang.srt.sampling.sampling_params import SamplingParams
66
from sglang.srt.server_args import ServerArgs
67
from sglang.srt.utils import flatten_nested_list, support_triton
Liangsheng Yin's avatar
Liangsheng Yin committed
68

69
if TYPE_CHECKING:
Cheng Wan's avatar
Cheng Wan committed
70
    from sglang.srt.configs.model_config import ModelConfig
71
72
73
    from sglang.srt.speculative.eagle_utils import EagleDraftInput, EagleVerifyInput
    from sglang.srt.speculative.spec_info import SpeculativeAlgorithm

Liangsheng Yin's avatar
Liangsheng Yin committed
74
INIT_INCREMENTAL_DETOKENIZATION_OFFSET = 5
Lianmin Zheng's avatar
Lianmin Zheng committed
75

76
77
GLOBAL_SERVER_ARGS_KEYS = [
    "attention_backend",
78
    "mm_attention_backend",
79
80
81
82
83
84
85
86
87
    "debug_tensor_dump_inject",
    "debug_tensor_dump_output_folder",
    "chunked_prefill_size",
    "device",
    "disable_chunked_prefix_cache",
    "disable_radix_cache",
    "enable_dp_attention",
    "enable_two_batch_overlap",
    "enable_dp_lm_head",
88
    "moe_a2a_backend",
89
    "deepep_mode",
90
91
    "enable_flashinfer_cutlass_moe",
    "enable_flashinfer_trtllm_moe",
92
    "enable_flashinfer_allreduce_fusion",
93
94
    "moe_dense_tp_size",
    "ep_dispatch_algorithm",
95
    "deepep_config",
96
    "ep_num_redundant_experts",
97
98
99
100
101
102
    "enable_nan_detection",
    "flashinfer_mla_disable_ragged",
    "max_micro_batch_size",
    "disable_shared_experts_fusion",
    "sampling_backend",
    "speculative_accept_threshold_single",
103
    "speculative_accept_threshold_acc",
104
105
    "torchao_config",
    "triton_attention_reduce_in_fp32",
106
    "num_reserved_decode_tokens",
107
    "weight_loader_disable_mmap",
Yuan Luo's avatar
Yuan Luo committed
108
    "enable_triton_kernel_moe",
109
    "enable_multimodal",
110
    "enable_symm_mem",
111
    "quantization",
112
113
]

114
# Put some global args for easy access
115
global_server_args_dict = {k: getattr(ServerArgs, k) for k in GLOBAL_SERVER_ARGS_KEYS}
116

Ying Sheng's avatar
Ying Sheng committed
117
118
119
logger = logging.getLogger(__name__)


120
121
122
class BaseFinishReason:
    def __init__(self, is_error: bool = False):
        self.is_error = is_error
Lianmin Zheng's avatar
Lianmin Zheng committed
123

124
    def to_json(self):
125
        raise NotImplementedError()
126
127
128


class FINISH_MATCHED_TOKEN(BaseFinishReason):
Mingyi's avatar
Mingyi committed
129
    def __init__(self, matched: Union[int, List[int]]):
130
131
132
        super().__init__()
        self.matched = matched

133
134
135
136
137
    def to_json(self):
        return {
            "type": "stop",  # to match OpenAI API's return value
            "matched": self.matched,
        }
138
139


140
141
class FINISH_MATCHED_STR(BaseFinishReason):
    def __init__(self, matched: str):
142
        super().__init__()
143
        self.matched = matched
144

145
146
147
148
149
    def to_json(self):
        return {
            "type": "stop",  # to match OpenAI API's return value
            "matched": self.matched,
        }
150
151


152
153
class FINISH_LENGTH(BaseFinishReason):
    def __init__(self, length: int):
154
        super().__init__()
155
        self.length = length
156

157
158
159
160
161
    def to_json(self):
        return {
            "type": "length",  # to match OpenAI API's return value
            "length": self.length,
        }
162
163
164


class FINISH_ABORT(BaseFinishReason):
Lianmin Zheng's avatar
Lianmin Zheng committed
165
    def __init__(self, message=None, status_code=None, err_type=None):
166
        super().__init__(is_error=True)
Lianmin Zheng's avatar
Lianmin Zheng committed
167
        self.message = message or "Aborted"
168
169
        self.status_code = status_code
        self.err_type = err_type
170

171
172
173
    def to_json(self):
        return {
            "type": "abort",
Lianmin Zheng's avatar
Lianmin Zheng committed
174
            "message": self.message,
175
176
            "status_code": self.status_code,
            "err_type": self.err_type,
177
        }
178

Lianmin Zheng's avatar
Lianmin Zheng committed
179

Mick's avatar
Mick committed
180
181
182
183
184
185
class Modality(Enum):
    IMAGE = auto()
    MULTI_IMAGES = auto()
    VIDEO = auto()
    AUDIO = auto()

186
187
188
189
190
191
192
193
194
    @staticmethod
    def from_str(modality_str: str):
        try:
            return Modality[modality_str.upper()]
        except KeyError:
            raise ValueError(
                f"Invalid modality string: {modality_str}. Valid modalities are: {[m.name for m in Modality]}"
            )

195
196
197
198
    @staticmethod
    def all():
        return [Modality.IMAGE, Modality.VIDEO, Modality.AUDIO]

Mick's avatar
Mick committed
199

200
@dataclasses.dataclass
Mick's avatar
Mick committed
201
202
class MultimodalDataItem:
    """
203
204
205
    One MultimodalDataItem contains all inputs for one modality.
    For example, if there are 3 images and 1 audio inputs, there will be 2 MultimodalDataItem.
    One for images and one for audio.
206

207
    We put the common fields first and the model-specific fields in model_specific_data.
Mick's avatar
Mick committed
208
    """
209

Mick's avatar
Mick committed
210
211
212
    modality: Modality
    hash: int = None
    pad_value: int = None
213
    offsets: Optional[list] = None
Mick's avatar
Mick committed
214

215
216
    # the raw features returned by processor, e.g. pixel_values or audio_features
    feature: Union[torch.Tensor, np.ndarray] = None
Mick's avatar
Mick committed
217
218
    # the precomputed embeddings, passed as final encoder embeddings
    # One and only one of the feature and precomputed_embeddings will be empty
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
    precomputed_embeddings: Optional[Union[torch.Tensor, np.ndarray]] = None

    # Model-specific data stored in a dictionary
    model_specific_data: dict[str, Any] = dataclasses.field(default_factory=dict)

    def __getattr__(self, name: str):
        if (
            "model_specific_data" in self.__dict__
            and name in self.__dict__["model_specific_data"]
        ):
            return self.__dict__["model_specific_data"][name]
        else:
            raise AttributeError(
                f"'{self.__class__.__name__}' object has no attribute '{name}'"
            )
Mick's avatar
Mick committed
234

235
236
237
238
239
    def __setitem__(self, key: str, value: Any):
        if key in self.__dict__:
            self.__dict__[key] = value
        else:
            self.model_specific_data[key] = value
240

241
242
    def set(self, key: str, value: Any):
        self.__setitem__(key, value)
243

Mick's avatar
Mick committed
244
245
246
247
248
249
250
251
    @staticmethod
    def is_empty_list(l):
        if l is None:
            return True
        return len([item for item in flatten_nested_list(l) if item is not None]) == 0

    def set_pad_value(self):
        """
Mick's avatar
Mick committed
252
        Set the pad value after first hashing the data
Mick's avatar
Mick committed
253
        """
254
        from sglang.srt.managers.mm_utils import hash_feature
Mick's avatar
Mick committed
255

256
        if self.hash is None:
257
258
            if self.feature is not None:
                hashed_feature = self.feature
259
            else:
260
                hashed_feature = self.precomputed_embeddings
261
            self.hash = hash_feature(hashed_feature)
Mick's avatar
Mick committed
262
263
264
        assert self.hash is not None
        self.pad_value = self.hash % (1 << 30)

265
266
267
    def is_modality(self, modality: Modality) -> bool:
        return self.modality == modality

Mick's avatar
Mick committed
268
    def is_audio(self):
269
        return self.modality == Modality.AUDIO
Mick's avatar
Mick committed
270
271

    def is_image(self):
272
        return self.modality in [Modality.IMAGE, Modality.MULTI_IMAGES]
Mick's avatar
Mick committed
273
274

    def is_video(self):
275
        return self.modality == Modality.VIDEO
Mick's avatar
Mick committed
276

277
278
279
    def is_valid(self) -> bool:
        return self.is_image() or self.is_video() or self.is_audio()

Mick's avatar
Mick committed
280
281
282
283
    def validate(self):
        ...
        # TODO

284
285
286
287
288
289
290
291
292
293
    @staticmethod
    def from_dict(obj: dict):
        kwargs = dict(obj)
        modality = kwargs.pop("modality")
        if isinstance(modality, str):
            modality = Modality[modality]
        ret = MultimodalDataItem(modality=modality, **kwargs)
        ret.validate()
        return ret

294
    def merge(self, other):
295
        self.feature += other.feature
296
        self.offsets += other.offsets
297
298
299
        self.hash = hash((self.hash, other.hash))
        self.set_pad_value()

Mick's avatar
Mick committed
300
301
302
303
304
305
306

@dataclasses.dataclass
class MultimodalInputs:
    """The multimodal data related inputs."""

    # items of data
    mm_items: List[MultimodalDataItem]
307
    image_pad_len: Optional[list] = None
308
    num_image_tokens: Optional[int] = None
Liangsheng Yin's avatar
Liangsheng Yin committed
309

Mick's avatar
Mick committed
310
    # image
Mick's avatar
Mick committed
311
    im_token_id: Optional[int] = None
312
313
314
315
    im_start_id: Optional[int] = None
    im_end_id: Optional[int] = None
    slice_start_id: Optional[int] = None
    slice_end_id: Optional[int] = None
Mick's avatar
Mick committed
316
317
318

    # video
    video_token_id: Optional[int] = None
Mick's avatar
Mick committed
319

Mick's avatar
Mick committed
320
    # audio
321
322
323
    audio_token_id: Optional[int] = None
    audio_start_id: Optional[int] = None
    audio_end_id: Optional[int] = None
Mick's avatar
Mick committed
324

325
326
327
328
    # QWen2-VL related
    mrope_positions: Optional[torch.Tensor] = None
    mrope_position_delta: Optional[torch.Tensor] = None

Liangsheng Yin's avatar
Liangsheng Yin committed
329
    @staticmethod
330
    def from_dict(obj: dict):
Mick's avatar
Mick committed
331
        ret = MultimodalInputs(
Mick's avatar
Mick committed
332
            mm_items=obj["mm_items"],
Liangsheng Yin's avatar
Liangsheng Yin committed
333
        )
334

Mick's avatar
Mick committed
335
        assert isinstance(ret.mm_items, list)
336
        ret.mm_items = [item for item in ret.mm_items if item.is_valid()]
Mick's avatar
Mick committed
337
338
        for item in ret.mm_items:
            item.set_pad_value()
339
340

        optional_args = [
341
342
            "mrope_positions",
            "mrope_position_delta",
343
            "im_token_id",
Mick's avatar
Mick committed
344
345
            "im_start_id",
            "im_end_id",
346
            "video_token_id",
Mick's avatar
Mick committed
347
348
            "slice_start_id",
            "slice_end_id",
Mick's avatar
Mick committed
349
350
            "audio_start_id",
            "audio_end_id",
351
            "audio_token_id",
352
353
354
355
356
        ]
        for arg in optional_args:
            if arg in obj:
                setattr(ret, arg, obj[arg])

Liangsheng Yin's avatar
Liangsheng Yin committed
357
358
        return ret

Mick's avatar
Mick committed
359
    def contains_image_inputs(self) -> bool:
Mick's avatar
Mick committed
360
        return any(item.is_image() for item in self.mm_items)
Mick's avatar
Mick committed
361

362
363
364
    def contains_video_inputs(self) -> bool:
        return any(item.is_video() for item in self.mm_items)

Mick's avatar
Mick committed
365
    def contains_audio_inputs(self) -> bool:
Mick's avatar
Mick committed
366
367
        return any(item.is_audio() for item in self.mm_items)

368
369
    def contains_mm_input(self) -> bool:
        return any(True for item in self.mm_items if item.is_valid())
Mick's avatar
Mick committed
370
371

    def merge(self, other: MultimodalInputs):
372
373
374
        """
        merge image inputs when requests are being merged
        """
375

376
        # args needed to be merged
377
        optional_args = [
Mick's avatar
Mick committed
378
            "mm_items",
379
            "image_pad_len",
380
381
        ]
        for arg in optional_args:
382
383
384
            self_arg = getattr(self, arg, None)
            if self_arg is not None:
                setattr(self, arg, self_arg + getattr(other, arg))
385
386
387
388
389
390
391
392
393
394

        mrope_positions = self.mrope_positions
        if mrope_positions is not None:
            if other.mrope_positions is None:
                self.mrope_positions = mrope_positions
            else:
                self.mrope_positions = torch.cat(
                    [self.mrope_positions, other.mrope_positions], dim=1
                )

395
396
397
398
399
400
401
402
        mrope_position_delta = self.mrope_position_delta
        if mrope_position_delta is not None:
            if other.mrope_position_delta is None:
                self.mrope_position_delta = mrope_position_delta
            else:
                self.mrope_position_delta = torch.cat(
                    [self.mrope_position_delta, other.mrope_position_delta], dim=0
                )
403
404
405
406
407
408

        for key, val in other.__dict__.items():
            if "_id" in key:
                # set token_ids
                if getattr(self, key, None) is None:
                    setattr(self, key, getattr(other, key, None))
409
        # other args would be kept intact
410

Liangsheng Yin's avatar
Liangsheng Yin committed
411

Lianmin Zheng's avatar
Lianmin Zheng committed
412
class Req:
413
    """The input and output status of a request."""
414

415
416
417
418
    def __init__(
        self,
        rid: str,
        origin_input_text: str,
419
        origin_input_ids: List[int],
420
        sampling_params: SamplingParams,
Lianmin Zheng's avatar
Lianmin Zheng committed
421
422
        return_logprob: bool = False,
        top_logprobs_num: int = 0,
423
        token_ids_logprob: List[int] = None,
Lianmin Zheng's avatar
Lianmin Zheng committed
424
        stream: bool = False,
425
        origin_input_ids_unpadded: Optional[Tuple[int]] = None,
426
        lora_id: Optional[str] = None,
Rin Intachuen's avatar
Rin Intachuen committed
427
        input_embeds: Optional[List[List[float]]] = None,
woodx's avatar
woodx committed
428
        token_type_ids: List[int] = None,
429
        session_id: Optional[str] = None,
430
        custom_logit_processor: Optional[str] = None,
431
        return_hidden_states: bool = False,
432
        eos_token_ids: Optional[Set[int]] = None,
433
        bootstrap_host: Optional[str] = None,
434
        bootstrap_port: Optional[int] = None,
435
        bootstrap_room: Optional[int] = None,
436
        data_parallel_rank: Optional[int] = None,
437
        vocab_size: Optional[int] = None,
438
    ):
439
        # Input and output info
Lianmin Zheng's avatar
Lianmin Zheng committed
440
        self.rid = rid
Liangsheng Yin's avatar
Liangsheng Yin committed
441
        self.origin_input_text = origin_input_text
442
443
444
445
446
        self.origin_input_ids_unpadded = (
            origin_input_ids_unpadded
            if origin_input_ids_unpadded
            else origin_input_ids  # Before image padding
        )
Liangsheng Yin's avatar
Liangsheng Yin committed
447
        self.origin_input_ids = origin_input_ids
448
449
450
        # Each decode stage's output ids
        self.output_ids = []
        # fill_ids = origin_input_ids + output_ids. Updated if chunked.
451
        self.fill_ids = []
452
        self.session_id = session_id
Lianmin Zheng's avatar
Lianmin Zheng committed
453
        self.input_embeds = input_embeds
454

woodx's avatar
woodx committed
455
456
457
        # for corss-endoder model
        self.token_type_ids = token_type_ids

tarinkk's avatar
tarinkk committed
458
459
460
        # The length of KV that have been removed in local attention chunked prefill
        self.evicted_seqlen_local = 0

Lianmin Zheng's avatar
Lianmin Zheng committed
461
        # Sampling info
462
463
464
465
466
        if isinstance(sampling_params.custom_params, dict):
            sampling_params = copy.copy(sampling_params)
            sampling_params.custom_params = sampling_params.custom_params | {
                "__req__": self
            }
467
        self.sampling_params = sampling_params
468
        self.custom_logit_processor = custom_logit_processor
469
        self.return_hidden_states = return_hidden_states
470
        self.lora_id = lora_id
Liangsheng Yin's avatar
Liangsheng Yin committed
471

472
        # Memory pool info
473
        self.req_pool_idx: Optional[int] = None
474

475
476
477
        # Check finish
        self.tokenizer = None
        self.finished_reason = None
Lianmin Zheng's avatar
Lianmin Zheng committed
478
479
        # Whether this request has finished output
        self.finished_output = None
480
481
        # If we want to abort the request in the middle of the event loop, set this to true
        # Note: We should never set finished_reason in the middle, the req will get filtered and never respond
482
        self.to_abort = False
Lianmin Zheng's avatar
Lianmin Zheng committed
483
        # This carries the error message for `.to_abort` and will be attached to the finished_reason at the end of the event loop
Lianmin Zheng's avatar
Lianmin Zheng committed
484
        self.to_abort_message: str = None
Lianmin Zheng's avatar
Lianmin Zheng committed
485
        self.stream = stream
486
        self.eos_token_ids = eos_token_ids
487
        self.vocab_size = vocab_size
488

489
        # For incremental decoding
490
491
492
493
494
495
496
497
        # ----- | --------- read_ids -------|
        # ----- |   surr_ids  |
        # xxxxx | xxxxxxxxxxx | xxxxxxxxxxx |
        # ----- ^ ----------- ^ ----------- ^
        # ----- 1 ----------- 2 ----------- 3
        # 1: surr_offset
        # 2: read_offset
        # 3: last token
Liangsheng Yin's avatar
Liangsheng Yin committed
498
499
        self.surr_offset = None  # Surrounding offset to defeat the cleanup algorithm
        self.read_offset = None
Lianmin Zheng's avatar
Lianmin Zheng committed
500
        self.decoded_text = ""
501

502
        # For multimodal inputs
Mick's avatar
Mick committed
503
        self.multimodal_inputs: Optional[MultimodalInputs] = None
504

505
        # Prefix info
506
        # The indices to kv cache for the shared prefix.
507
        self.prefix_indices: torch.Tensor = []
508
        # Number of tokens to run prefill.
509
        self.extend_input_len = 0
510
511
        # The relative logprob_start_len in an extend batch
        self.extend_logprob_start_len = 0
512
513
514
        self.last_node: Any = None
        self.last_host_node: Any = None
        self.host_hit_length = 0
Hanming Lu's avatar
Hanming Lu committed
515
516
        # The node to lock until for swa radix tree lock ref
        self.swa_uuid_for_lock: Optional[int] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
517

518
519
520
521
        # Whether or not if it is chunked. It increments whenever
        # it is chunked, and decrement whenever chunked request is
        # processed.
        self.is_chunked = 0
522

523
524
525
        # For retraction
        self.is_retracted = False

526
527
528
529
530
531
532
        # Incremental streamining
        self.send_token_offset: int = 0
        self.send_decode_id_offset: int = 0
        # TODO (Byron): send_output_token_logprobs_offset and send_decode_id_offset can be different in disaggregation mode
        # because the decode server does not have the first output token logprobs
        self.send_output_token_logprobs_offset: int = 0

533
        # Logprobs (arguments)
Lianmin Zheng's avatar
Lianmin Zheng committed
534
        self.return_logprob = return_logprob
535
        # Start index to compute logprob from.
536
        self.logprob_start_len = 0
Lianmin Zheng's avatar
Lianmin Zheng committed
537
        self.top_logprobs_num = top_logprobs_num
538
        self.token_ids_logprob = token_ids_logprob
Lianmin Zheng's avatar
Lianmin Zheng committed
539
540
        self.temp_scaled_logprobs = False
        self.top_p_normalized_logprobs = False
541

542
        # Logprobs (return values)
543
544
        # True means the input logprob has been already sent to detokenizer.
        self.input_logprob_sent: bool = False
545
546
547
548
        self.input_token_logprobs_val: Optional[List[float]] = None
        self.input_token_logprobs_idx: Optional[List[int]] = None
        self.input_top_logprobs_val: Optional[List[float]] = None
        self.input_top_logprobs_idx: Optional[List[int]] = None
549
550
551
552
553
554
555
556
        self.input_token_ids_logprobs_val: Optional[List[float]] = None
        self.input_token_ids_logprobs_idx: Optional[List[int]] = None
        # Temporary holder to store input_token_logprobs.
        self.input_token_logprobs: Optional[List[Tuple[int]]] = None
        self.temp_input_top_logprobs_val: Optional[List[torch.Tensor]] = None
        self.temp_input_top_logprobs_idx: Optional[List[int]] = None
        self.temp_input_token_ids_logprobs_val: Optional[List[float]] = None
        self.temp_input_token_ids_logprobs_idx: Optional[List[int]] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
557
558

        if return_logprob:
559
            # shape: (bs, 1)
Lianmin Zheng's avatar
Lianmin Zheng committed
560
561
            self.output_token_logprobs_val = []
            self.output_token_logprobs_idx = []
562
            # shape: (bs, k)
Lianmin Zheng's avatar
Lianmin Zheng committed
563
564
            self.output_top_logprobs_val = []
            self.output_top_logprobs_idx = []
565
566
            self.output_token_ids_logprobs_val = []
            self.output_token_ids_logprobs_idx = []
Lianmin Zheng's avatar
Lianmin Zheng committed
567
568
569
        else:
            self.output_token_logprobs_val = self.output_token_logprobs_idx = (
                self.output_top_logprobs_val
570
571
572
            ) = self.output_top_logprobs_idx = self.output_token_ids_logprobs_val = (
                self.output_token_ids_logprobs_idx
            ) = None
573
        self.hidden_states: List[List[float]] = []
574
        self.hidden_states_tensor = None  # Note: use tensor instead of list to transfer hidden_states when PD + MTP
575

576
        # Embedding (return values)
577
        self.embedding = None
Lianmin Zheng's avatar
Lianmin Zheng committed
578

579
        # Constrained decoding
580
        self.grammar: Optional[BaseGrammarObject] = None
581
        self.grammar_wait_ct = 0
Liangsheng Yin's avatar
Liangsheng Yin committed
582

583
        # The number of cached tokens that were already cached in the KV cache
584
        self.cached_tokens = 0
585
        self.already_computed = 0
586

587
588
589
        # The number of verification forward passes in the speculative decoding.
        # This is used to compute the average acceptance length per request.
        self.spec_verify_ct = 0
590
591
592
593
594
595

        # For metrics
        self.time_stats: TimeStats = TimeStats()
        self.has_log_time_stats: bool = False
        self.queue_time_start = None
        self.queue_time_end = None
596

Byron Hsu's avatar
Byron Hsu committed
597
        # For disaggregation
598
        self.bootstrap_host: str = bootstrap_host
599
        self.bootstrap_port: Optional[int] = bootstrap_port
600
        self.bootstrap_room: Optional[int] = bootstrap_room
601
        self.disagg_kv_sender: Optional[BaseKVSender] = None
Byron Hsu's avatar
Byron Hsu committed
602

603
604
605
        # For data parallel rank routing
        self.data_parallel_rank: Optional[int] = data_parallel_rank

Byron Hsu's avatar
Byron Hsu committed
606
607
608
609
610
611
612
        # the start index of the sent kv cache
        # We want to send it chunk by chunk for chunked prefill.
        # After every chunk forward, we do the following:
        # kv_send(req.input_ids[req.start_send_idx:len(req.fill_ids)])
        # start_send_idx = len(req.fill_ids)
        self.start_send_idx: int = 0

613
614
615
616
        # For overlap schedule, we delay the kv transfer until `process_batch_result_disagg_prefill` rather than `process_prefill_chunk` in non-overlap
        # This is because kv is not ready in `process_prefill_chunk`.
        # We use `tmp_end_idx` to store the end index of the kv cache to send.
        self.tmp_end_idx: int = -1
Lianmin Zheng's avatar
Lianmin Zheng committed
617
        self.metadata_buffer_index: int = -1
618

619
620
621
622
    @property
    def seqlen(self):
        return len(self.origin_input_ids) + len(self.output_ids)

623
    def extend_image_inputs(self, image_inputs):
Mick's avatar
Mick committed
624
625
        if self.multimodal_inputs is None:
            self.multimodal_inputs = image_inputs
626
        else:
Mick's avatar
Mick committed
627
            self.multimodal_inputs.merge(image_inputs)
628

629
    def finished(self) -> bool:
Lianmin Zheng's avatar
Lianmin Zheng committed
630
        # Whether request reached finished condition
631
632
        return self.finished_reason is not None

633
634
635
636
    def init_next_round_input(
        self,
        tree_cache: Optional[BasePrefixCache] = None,
    ):
637
        self.fill_ids = self.origin_input_ids + self.output_ids
638
        if tree_cache is not None:
639
640
641
642
643
644
645
646
            (
                self.prefix_indices,
                self.last_node,
                self.last_host_node,
                self.host_hit_length,
            ) = tree_cache.match_prefix(
                key=self.adjust_max_prefix_ids(),
            )
647
        self.extend_input_len = len(self.fill_ids) - len(self.prefix_indices)
648

649
    def adjust_max_prefix_ids(self):
650
651
        self.fill_ids = self.origin_input_ids + self.output_ids
        input_len = len(self.fill_ids)
652
653
654
655

        # FIXME: To work around some bugs in logprob computation, we need to ensure each
        # request has at least one token. Later, we can relax this requirement and use `input_len`.
        max_prefix_len = input_len - 1
Liangsheng Yin's avatar
Liangsheng Yin committed
656
657
658
659
660

        if self.sampling_params.max_new_tokens > 0:
            # Need at least one token to compute logits
            max_prefix_len = min(max_prefix_len, input_len - 1)

661
        if self.return_logprob:
662
            max_prefix_len = min(max_prefix_len, self.logprob_start_len)
663

664
        max_prefix_len = max(max_prefix_len, 0)
665
        return self.fill_ids[:max_prefix_len]
666

Liangsheng Yin's avatar
Liangsheng Yin committed
667
    # Based on https://github.com/vllm-project/vllm/blob/7a64d24aad69e4d2548aa0bf528d9fe63428ab01/vllm/transformers_utils/detokenizer.py#L194-L313
668
    def init_incremental_detokenize(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
669
670
671
672
673
674
675
676
677
        first_iter = self.surr_offset is None or self.read_offset is None

        if first_iter:
            self.read_offset = len(self.origin_input_ids_unpadded)
            self.surr_offset = max(
                self.read_offset - INIT_INCREMENTAL_DETOKENIZATION_OFFSET, 0
            )

        all_ids = self.origin_input_ids_unpadded + self.output_ids
678
        return all_ids[self.surr_offset :], self.read_offset - self.surr_offset
Liangsheng Yin's avatar
Liangsheng Yin committed
679

680
    def check_finished(self):
681
        if self.finished():
682
683
            return

684
        if self.to_abort:
685
686
687
            self.finished_reason = FINISH_ABORT(
                message=self.to_abort_message,
            )
688
689
            return

Liangsheng Yin's avatar
Liangsheng Yin committed
690
        if len(self.output_ids) >= self.sampling_params.max_new_tokens:
691
692
693
            self.finished_reason = FINISH_LENGTH(
                length=self.sampling_params.max_new_tokens
            )
694
695
            return

696
697
698
699
700
        if self.grammar is not None:
            if self.grammar.is_terminated():
                self.finished_reason = FINISH_MATCHED_TOKEN(matched=self.output_ids[-1])
                return

701
        last_token_id = self.output_ids[-1]
702

703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
        if not self.sampling_params.ignore_eos:
            matched_eos = False

            # Check stop token ids
            if self.sampling_params.stop_token_ids:
                matched_eos = last_token_id in self.sampling_params.stop_token_ids
            if self.eos_token_ids:
                matched_eos |= last_token_id in self.eos_token_ids
            if self.tokenizer is not None:
                matched_eos |= last_token_id == self.tokenizer.eos_token_id
                if self.tokenizer.additional_stop_token_ids:
                    matched_eos |= (
                        last_token_id in self.tokenizer.additional_stop_token_ids
                    )
            if matched_eos:
                self.finished_reason = FINISH_MATCHED_TOKEN(matched=last_token_id)
                return
720

721
722
723
724
725
726
727
728
        if last_token_id > self.vocab_size or last_token_id < 0:
            if self.sampling_params.stop_token_ids:
                self.output_ids[-1] = next(iter(self.sampling_params.stop_token_ids))
            if self.eos_token_ids:
                self.output_ids[-1] = next(iter(self.eos_token_ids))
            self.finished_reason = FINISH_MATCHED_STR(matched="NaN happened")
            return

729
        # Check stop strings
730
731
732
733
734
735
        if len(self.sampling_params.stop_strs) > 0:
            tail_str = self.tokenizer.decode(
                self.output_ids[-(self.sampling_params.stop_str_max_len + 1) :]
            )

            for stop_str in self.sampling_params.stop_strs:
Liangsheng Yin's avatar
Liangsheng Yin committed
736
                if stop_str in tail_str or stop_str in self.decoded_text:
737
                    self.finished_reason = FINISH_MATCHED_STR(matched=stop_str)
738
739
                    return

740
741
742
    def reset_for_retract(self):
        self.prefix_indices = []
        self.last_node = None
Hanming Lu's avatar
Hanming Lu committed
743
        self.swa_uuid_for_lock = None
744
745
        self.extend_input_len = 0
        self.is_retracted = True
746
747
748
749
750
751
        self.input_token_logprobs = None
        self.temp_input_top_logprobs_val = None
        self.temp_input_top_logprobs_idx = None
        self.extend_logprob_start_len = 0
        self.is_chunked = 0
        self.req_pool_idx = None
752
        self.already_computed = 0
753

Lianmin Zheng's avatar
Lianmin Zheng committed
754
755
756
757
758
759
760
761
762
763
764
765
766
    def offload_kv_cache(self, req_to_token_pool, token_to_kv_pool_allocator):
        token_indices = req_to_token_pool.req_to_token[
            self.req_pool_idx, : self.seqlen - 1
        ]
        self.kv_cache_cpu = token_to_kv_pool_allocator.get_cpu_copy(token_indices)

    def load_kv_cache(self, req_to_token_pool, token_to_kv_pool_allocator):
        token_indices = req_to_token_pool.req_to_token[
            self.req_pool_idx, : self.seqlen - 1
        ]
        token_to_kv_pool_allocator.load_cpu_copy(self.kv_cache_cpu, token_indices)
        del self.kv_cache_cpu

767
768
769
770
771
772
773
774
775
776
777
778
    def log_time_stats(self):
        # If overlap schedule, we schedule one decode batch ahead so this gets called twice.
        if self.has_log_time_stats is True:
            return

        if self.bootstrap_room is not None:
            prefix = f"Req Time Stats(rid={self.rid}, bootstrap_room={self.bootstrap_room}, input len={len(self.origin_input_ids)}, output len={len(self.output_ids)}, type={self.time_stats.get_type().value})"
        else:
            prefix = f"Req Time Stats(rid={self.rid}, input len={len(self.origin_input_ids)}, output len={len(self.output_ids)}, type={self.time_stats.get_type().value})"
        logger.info(f"{prefix}: {self.time_stats}")
        self.has_log_time_stats = True

779
780
781
782
783
784
    def set_finish_with_abort(self, error_msg: str):
        if get_tensor_model_parallel_rank() == 0:
            logger.error(f"{error_msg}, {self.rid=}")
        self.multimodal_inputs = None
        self.grammar = None
        self.origin_input_ids = [0]  # set it to one token to skip the long prefill
785
        self.return_logprob = False
786
787
788
789
        self.finished_reason = FINISH_ABORT(
            error_msg, HTTPStatus.BAD_REQUEST, "BadRequestError"
        )

Lianmin Zheng's avatar
Lianmin Zheng committed
790
    def __repr__(self):
791
        return (
792
            f"Req(rid={self.rid}, "
Lianmin Zheng's avatar
Lianmin Zheng committed
793
794
795
            f"input_ids={self.origin_input_ids}, output_ids={self.output_ids}, "
            f"{self.grammar=}, "
            f"{self.sampling_params=})"
796
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
797
798


Lianmin Zheng's avatar
Lianmin Zheng committed
799
# Batch id
800
801
802
bid = 0


803
@dataclasses.dataclass
Byron Hsu's avatar
Byron Hsu committed
804
class ScheduleBatch(ScheduleBatchDisaggregationDecodeMixin):
805
    """Store all information of a batch on the scheduler."""
806

807
    # Request, memory pool, and cache
808
    reqs: List[Req]
809
    req_to_token_pool: ReqToTokenPool = None
810
    token_to_kv_pool_allocator: BaseTokenToKVPoolAllocator = None
811
    tree_cache: BasePrefixCache = None
Hanming Lu's avatar
Hanming Lu committed
812
    is_hybrid: bool = False
813

814
    # Batch configs
815
    model_config: ModelConfig = None
Liangsheng Yin's avatar
Liangsheng Yin committed
816
    forward_mode: ForwardMode = None
817
    enable_overlap: bool = False
Lianmin Zheng's avatar
Lianmin Zheng committed
818
819
820
821
    # Tell whether the current running batch is full so that we can skip
    # the check of whether to prefill new requests.
    # This is an optimization to reduce the overhead of the prefill check.
    batch_is_full: bool = False
822

823
824
825
    # Events
    launch_done: Optional[threading.Event] = None

826
827
828
    # For chunked prefill in PP
    chunked_req: Optional[Req] = None

829
    # Sampling info
830
    sampling_info: SamplingBatchInfo = None
831
    next_batch_sampling_info: SamplingBatchInfo = None
Liangsheng Yin's avatar
Liangsheng Yin committed
832

833
    # Batched arguments to model runner
Lianmin Zheng's avatar
Lianmin Zheng committed
834
    input_ids: torch.Tensor = None  # shape: [b], int64
835
    input_embeds: torch.Tensor = None  # shape: [b, hidden_size], float32
woodx's avatar
woodx committed
836
    token_type_ids: torch.Tensor = None  # shape: [b], int64
Lianmin Zheng's avatar
Lianmin Zheng committed
837
    req_pool_indices: torch.Tensor = None  # shape: [b], int64
838
    seq_lens: torch.Tensor = None  # shape: [b], int64
839
    # The output locations of the KV cache
Lianmin Zheng's avatar
Lianmin Zheng committed
840
841
    out_cache_loc: torch.Tensor = None  # shape: [b], int64
    output_ids: torch.Tensor = None  # shape: [b], int64
842

843
844
845
    # For multimodal inputs
    multimodal_inputs: Optional[List] = None

846
847
848
    # The sum of all sequence lengths
    seq_lens_sum: int = None

Ke Bao's avatar
Ke Bao committed
849
850
    # For DP attention
    global_num_tokens: Optional[List[int]] = None
851
    global_num_tokens_for_logprob: Optional[List[int]] = None
852
    is_extend_in_batch: bool = False
853
    can_run_dp_cuda_graph: bool = False
854
855
    tbo_split_seq_index: Optional[int] = None
    global_forward_mode: Optional[ForwardMode] = None
Ke Bao's avatar
Ke Bao committed
856

857
    # For processing logprobs
858
    return_logprob: bool = False
859
    top_logprobs_nums: Optional[List[int]] = None
860
    token_ids_logprobs: Optional[List[List[int]]] = None
861

Lianmin Zheng's avatar
Lianmin Zheng committed
862
863
864
865
    # For logits and logprob post processing
    temp_scaled_logprobs: bool = False
    top_p_normalized_logprobs: bool = False

866
867
868
    # For extend and mixed chunekd prefill
    prefix_lens: List[int] = None
    extend_lens: List[int] = None
869
    extend_num_tokens: Optional[int] = None
870
    decoding_reqs: List[Req] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
871
    extend_logprob_start_lens: List[int] = None
872
873
    # It comes empty list if logprob is not required.
    extend_input_logprob_token_ids: Optional[torch.Tensor] = None
874

Lianmin Zheng's avatar
Lianmin Zheng committed
875
    # For encoder-decoder architectures
876
877
878
879
880
    encoder_cached: Optional[List[bool]] = None
    encoder_lens: Optional[torch.Tensor] = None
    encoder_lens_cpu: Optional[List[int]] = None
    encoder_out_cache_loc: Optional[torch.Tensor] = None

881
882
883
    # Stream
    has_stream: bool = False

884
885
    # Has grammar
    has_grammar: bool = False
886

887
    # Device
888
889
    device: str = "cuda"

890
    # Speculative decoding
891
    spec_algorithm: SpeculativeAlgorithm = None
892
    spec_info: Optional[Union[EagleDraftInput, EagleVerifyInput]] = None
893

894
895
896
    # Enable custom logit processor
    enable_custom_logit_processor: bool = False

897
898
899
    # Whether to return hidden states
    return_hidden_states: bool = False

900
901
902
    # hicache pointer for synchronizing data loading from CPU to GPU
    hicache_consumer_index: int = 0

903
    @classmethod
904
905
    def init_new(
        cls,
906
        reqs: List[Req],
907
        req_to_token_pool: ReqToTokenPool,
908
        token_to_kv_pool_allocator: BaseTokenToKVPoolAllocator,
909
910
911
        tree_cache: BasePrefixCache,
        model_config: ModelConfig,
        enable_overlap: bool,
912
        spec_algorithm: SpeculativeAlgorithm,
913
        enable_custom_logit_processor: bool,
914
        chunked_req: Optional[Req] = None,
915
    ):
Lianmin Zheng's avatar
Lianmin Zheng committed
916
917
        return_logprob = any(req.return_logprob for req in reqs)

Hanming Lu's avatar
Hanming Lu committed
918
919
920
921
922
923
924
        is_hybrid = False
        if isinstance(token_to_kv_pool_allocator, SWATokenToKVPoolAllocator):
            assert isinstance(tree_cache, SWARadixCache) or isinstance(
                tree_cache, SWAChunkCache
            ), "SWARadixCache or SWAChunkCache is required for SWATokenToKVPoolAllocator"
            is_hybrid = True

925
926
927
        return cls(
            reqs=reqs,
            req_to_token_pool=req_to_token_pool,
928
            token_to_kv_pool_allocator=token_to_kv_pool_allocator,
929
            tree_cache=tree_cache,
Hanming Lu's avatar
Hanming Lu committed
930
            is_hybrid=is_hybrid,
931
            model_config=model_config,
932
            enable_overlap=enable_overlap,
Lianmin Zheng's avatar
Lianmin Zheng committed
933
            return_logprob=return_logprob,
934
            has_stream=any(req.stream for req in reqs),
935
            has_grammar=any(req.grammar for req in reqs),
Zhang, Liangang's avatar
Zhang, Liangang committed
936
            device=req_to_token_pool.device,
937
            spec_algorithm=spec_algorithm,
938
            enable_custom_logit_processor=enable_custom_logit_processor,
939
            return_hidden_states=any(req.return_hidden_states for req in reqs),
940
            chunked_req=chunked_req,
Lianmin Zheng's avatar
Lianmin Zheng committed
941
942
        )

943
    def batch_size(self):
944
        return len(self.reqs)
945

Lianmin Zheng's avatar
Lianmin Zheng committed
946
947
948
    def is_empty(self):
        return len(self.reqs) == 0

949
    def alloc_req_slots(self, num_reqs: int):
950
951
952
        req_pool_indices = self.req_to_token_pool.alloc(num_reqs)
        if req_pool_indices is None:
            raise RuntimeError(
953
954
955
956
                "alloc_req_slots runs out of memory. "
                "Please set a smaller number for `--max-running-requests`. "
                f"{self.req_to_token_pool.available_size()=}, "
                f"{num_reqs=}, "
957
958
959
            )
        return req_pool_indices

960
    def alloc_token_slots(self, num_tokens: int, backup_state: bool = False):
Hanming Lu's avatar
Hanming Lu committed
961
        self._evict_tree_cache_if_needed(num_tokens)
Lianmin Zheng's avatar
Lianmin Zheng committed
962

963
964
965
        if backup_state:
            state = self.token_to_kv_pool_allocator.backup_state()

966
        out_cache_loc = self.token_to_kv_pool_allocator.alloc(num_tokens)
Lianmin Zheng's avatar
Lianmin Zheng committed
967
968
969
970
971
        if out_cache_loc is None:
            phase_str = "Prefill" if self.forward_mode.is_extend() else "Decode"
            error_msg = (
                f"{phase_str} out of memory. Try to lower your batch size.\n"
                f"Try to allocate {num_tokens} tokens.\n"
Hanming Lu's avatar
Hanming Lu committed
972
                f"{self._available_and_evictable_str()}"
Lianmin Zheng's avatar
Lianmin Zheng committed
973
974
975
976
977
978
            )
            logger.error(error_msg)
            if self.tree_cache is not None:
                self.tree_cache.pretty_print()
            raise RuntimeError(error_msg)

979
980
981
982
        if backup_state:
            return out_cache_loc, state
        else:
            return out_cache_loc
Lianmin Zheng's avatar
Lianmin Zheng committed
983
984
985
986
987
988
989

    def alloc_paged_token_slots_extend(
        self,
        prefix_lens: torch.Tensor,
        seq_lens: torch.Tensor,
        last_loc: torch.Tensor,
        extend_num_tokens: int,
990
        backup_state: bool = False,
Lianmin Zheng's avatar
Lianmin Zheng committed
991
    ):
Hanming Lu's avatar
Hanming Lu committed
992
993
        num_tokens = (
            extend_num_tokens
Lianmin Zheng's avatar
Lianmin Zheng committed
994
            + len(seq_lens) * self.token_to_kv_pool_allocator.page_size
Hanming Lu's avatar
Hanming Lu committed
995
996
        )
        self._evict_tree_cache_if_needed(num_tokens)
997

998
999
1000
        if backup_state:
            state = self.token_to_kv_pool_allocator.backup_state()

Lianmin Zheng's avatar
Lianmin Zheng committed
1001
1002
1003
        out_cache_loc = self.token_to_kv_pool_allocator.alloc_extend(
            prefix_lens, seq_lens, last_loc, extend_num_tokens
        )
1004
        if out_cache_loc is None:
Lianmin Zheng's avatar
Lianmin Zheng committed
1005
1006
1007
            error_msg = (
                f"Prefill out of memory. Try to lower your batch size.\n"
                f"Try to allocate {extend_num_tokens} tokens.\n"
Hanming Lu's avatar
Hanming Lu committed
1008
                f"{self._available_and_evictable_str()}"
Lianmin Zheng's avatar
Lianmin Zheng committed
1009
1010
1011
            )
            logger.error(error_msg)
            raise RuntimeError(error_msg)
1012
1013
1014
1015
1016

        if backup_state:
            return out_cache_loc, state
        else:
            return out_cache_loc
Lianmin Zheng's avatar
Lianmin Zheng committed
1017
1018
1019
1020
1021

    def alloc_paged_token_slots_decode(
        self,
        seq_lens: torch.Tensor,
        last_loc: torch.Tensor,
1022
        backup_state: bool = False,
Lianmin Zheng's avatar
Lianmin Zheng committed
1023
    ):
Hanming Lu's avatar
Hanming Lu committed
1024
1025
1026
        num_tokens = len(seq_lens) * self.token_to_kv_pool_allocator.page_size

        self._evict_tree_cache_if_needed(num_tokens)
1027

1028
1029
1030
1031
        if backup_state:
            state = self.token_to_kv_pool_allocator.backup_state()

        out_cache_loc = self.token_to_kv_pool_allocator.alloc_decode(seq_lens, last_loc)
Lianmin Zheng's avatar
Lianmin Zheng committed
1032
1033
1034
1035
        if out_cache_loc is None:
            error_msg = (
                f"Decode out of memory. Try to lower your batch size.\n"
                f"Try to allocate {len(seq_lens)} tokens.\n"
Hanming Lu's avatar
Hanming Lu committed
1036
                f"{self._available_and_evictable_str()}"
Lianmin Zheng's avatar
Lianmin Zheng committed
1037
1038
1039
            )
            logger.error(error_msg)
            raise RuntimeError(error_msg)
1040
1041
1042
1043
1044

        if backup_state:
            return out_cache_loc, state
        else:
            return out_cache_loc
1045

1046
1047
1048
1049
1050
    def prepare_encoder_info_extend(self, input_ids: List[int], seq_lens: List[int]):
        self.encoder_lens_cpu = []
        self.encoder_cached = []

        for req in self.reqs:
Mick's avatar
Mick committed
1051
            im = req.multimodal_inputs
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
            if im is None or im.num_image_tokens is None:
                # No image input
                self.encoder_lens_cpu.append(0)
                self.encoder_cached.append(True)
            else:
                self.encoder_lens_cpu.append(im.num_image_tokens)
                self.encoder_cached.append(
                    self.forward_mode.is_decode()
                    or len(req.prefix_indices) >= im.num_image_tokens
                )

1063
        self.encoder_lens = torch.tensor(self.encoder_lens_cpu, dtype=torch.int64).to(
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
            self.device, non_blocking=True
        )

        # Strip encoder infos
        pt = 0
        decoder_out_cache_loc = []
        encoder_out_cache_loc = []
        for i, req in enumerate(self.reqs):
            encoder_len = self.encoder_lens_cpu[i]
            seq_lens[i] -= encoder_len

            if len(req.prefix_indices) < encoder_len:
1076
                # NOTE: the encoder part should be considered as a whole
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
                assert len(req.prefix_indices) == 0
                input_ids[i] = input_ids[i][encoder_len:]
                encoder_out_cache_loc.append(self.out_cache_loc[pt : pt + encoder_len])
                decoder_out_cache_loc.append(
                    self.out_cache_loc[pt + encoder_len : pt + req.extend_input_len]
                )
                self.extend_lens[i] -= encoder_len
                self.extend_num_tokens -= encoder_len
            else:
                decoder_out_cache_loc.append(
                    self.out_cache_loc[pt : pt + req.extend_input_len]
                )
                self.prefix_lens[i] -= encoder_len

            pt += req.extend_input_len

        # Reassign
Lianmin Zheng's avatar
Lianmin Zheng committed
1094
        self.input_ids = torch.tensor(sum(input_ids, []), dtype=torch.int64).to(
1095
1096
            self.device, non_blocking=True
        )
1097
        self.seq_lens = torch.tensor(seq_lens, dtype=torch.int64).to(
1098
1099
1100
1101
            self.device, non_blocking=True
        )

        if not decoder_out_cache_loc:
Lianmin Zheng's avatar
Lianmin Zheng committed
1102
            self.out_cache_loc = torch.zeros(0, dtype=torch.int64).to(
1103
1104
1105
1106
1107
1108
                self.device, non_blocking=True
            )
        else:
            self.out_cache_loc = torch.cat(decoder_out_cache_loc)

        if not encoder_out_cache_loc:
Lianmin Zheng's avatar
Lianmin Zheng committed
1109
            self.encoder_out_cache_loc = torch.zeros(0, dtype=torch.int64).to(
1110
1111
1112
1113
1114
                self.device, non_blocking=True
            )
        else:
            self.encoder_out_cache_loc = torch.cat(encoder_out_cache_loc)

1115
1116
1117
        assert (
            len(self.out_cache_loc) == self.extend_num_tokens
        ), f"Expected {len(self.out_cache_loc)}, got {self.extend_num_tokens}"
1118

1119
    def prepare_for_extend(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
1120
1121
        self.forward_mode = ForwardMode.EXTEND

Lianmin Zheng's avatar
Lianmin Zheng committed
1122
        # Allocate req slots
1123
        bs = len(self.reqs)
Lianmin Zheng's avatar
Lianmin Zheng committed
1124
1125
1126
        req_pool_indices = self.alloc_req_slots(bs)

        # Init tensors
Lianmin Zheng's avatar
Lianmin Zheng committed
1127
        reqs = self.reqs
1128
        input_ids = [r.fill_ids[len(r.prefix_indices) :] for r in reqs]
1129
        extend_num_tokens = sum(len(ids) for ids in input_ids)
Lianmin Zheng's avatar
Lianmin Zheng committed
1130
1131
1132
        seq_lens = [len(r.fill_ids) for r in reqs]
        prefix_lens = [len(r.prefix_indices) for r in reqs]
        extend_lens = [r.extend_input_len for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1133

woodx's avatar
woodx committed
1134
1135
1136
1137
        token_type_ids = [
            r.token_type_ids for r in reqs if r.token_type_ids is not None
        ]

Lianmin Zheng's avatar
Lianmin Zheng committed
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
        req_pool_indices_tensor = torch.tensor(req_pool_indices, dtype=torch.int64).to(
            self.device, non_blocking=True
        )
        input_ids_tensor = torch.tensor(sum(input_ids, []), dtype=torch.int64).to(
            self.device, non_blocking=True
        )
        seq_lens_tensor = torch.tensor(seq_lens, dtype=torch.int64).to(
            self.device, non_blocking=True
        )
        prefix_lens_tensor = torch.tensor(
            prefix_lens, dtype=torch.int64, device=self.device
        )
woodx's avatar
woodx committed
1150
1151
1152
1153
1154
1155
1156

        token_type_ids_tensor = None
        if len(token_type_ids) > 0:
            token_type_ids_tensor = torch.tensor(
                sum(token_type_ids, []), dtype=torch.int64
            ).to(self.device, non_blocking=True)

Lianmin Zheng's avatar
Lianmin Zheng committed
1157
        extend_lens_tensor = seq_lens_tensor - prefix_lens_tensor
1158

Lianmin Zheng's avatar
Lianmin Zheng committed
1159
        # Copy prefix and do some basic check
Rin Intachuen's avatar
Rin Intachuen committed
1160
        input_embeds = []
1161
        extend_input_logprob_token_ids = []
1162
        multimodal_inputs = []
Rin Intachuen's avatar
Rin Intachuen committed
1163

Lianmin Zheng's avatar
Lianmin Zheng committed
1164
        for i, (req, seq_len, pre_len) in enumerate(zip(reqs, seq_lens, prefix_lens)):
1165
            req.req_pool_idx = req_pool_indices[i]
1166
            assert seq_len - pre_len == req.extend_input_len
Lianmin Zheng's avatar
Lianmin Zheng committed
1167

1168
            if pre_len > 0:
1169
1170
                self.req_to_token_pool.write(
                    (req.req_pool_idx, slice(0, pre_len)), req.prefix_indices
1171
                )
tarinkk's avatar
tarinkk committed
1172
                if isinstance(self.tree_cache, SWAChunkCache):
Hanming Lu's avatar
Hanming Lu committed
1173
                    self.tree_cache.evict_swa(
tarinkk's avatar
tarinkk committed
1174
1175
                        req, pre_len, self.model_config.attention_chunk_size
                    )
1176

Rin Intachuen's avatar
Rin Intachuen committed
1177
1178
1179
1180
1181
            # If input_embeds are available, store them
            if req.input_embeds is not None:
                # If req.input_embeds is already a list, append its content directly
                input_embeds.extend(req.input_embeds)  # Use extend to avoid nesting

1182
1183
            multimodal_inputs.append(req.multimodal_inputs)

1184
1185
            req.cached_tokens += pre_len - req.already_computed
            req.already_computed = seq_len
1186
            req.is_retracted = False
Lianmin Zheng's avatar
Lianmin Zheng committed
1187

1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
            # Compute the relative logprob_start_len in an extend batch
            if req.logprob_start_len >= pre_len:
                req.extend_logprob_start_len = min(
                    req.logprob_start_len - pre_len,
                    req.extend_input_len,
                    req.seqlen - 1,
                )
            else:
                req.extend_logprob_start_len = 0

            if self.return_logprob:
                # Find input logprob token ids.
                # First, find a global index within origin_input_ids and slide it by 1
                # to compute input logprobs. It is because you need the next token
                # to compute input logprobs. E.g., (chunk size 2)
                #
                # input_logprobs = [1, 2, 3, 4]
                # fill_ids = [1, 2]
                # extend_input_logprob_token_id = [2, 3]
                #
                # Note that it can also overflow. In this case, we pad it with 0.
                # input_logprobs = [1, 2, 3, 4]
                # fill_ids = [3, 4]
                # extend_input_logprob_token_id = [4, 0]
                global_start_idx, global_end_idx = (
                    len(req.prefix_indices),
                    len(req.fill_ids),
                )
                # Apply logprob_start_len
                if global_start_idx < req.logprob_start_len:
                    global_start_idx = req.logprob_start_len

                logprob_token_ids = req.origin_input_ids[
                    global_start_idx + 1 : global_end_idx + 1
                ]
                extend_input_logprob_token_ids.extend(logprob_token_ids)

                # We will need req.extend_input_len - req.extend_logprob_start_len number of
                # tokens, and logprob_token_ids is for input logprob, so pad the rest of them by 0.
                extend_input_logprob_token_ids.extend(
                    [0]
                    * (
                        req.extend_input_len
                        - req.extend_logprob_start_len
                        - len(logprob_token_ids)
                    )
                )

        if self.return_logprob:
            extend_input_logprob_token_ids = torch.tensor(
                extend_input_logprob_token_ids
            )
        else:
            extend_input_logprob_token_ids = None
Lianmin Zheng's avatar
Lianmin Zheng committed
1242

Lianmin Zheng's avatar
Lianmin Zheng committed
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
        # Allocate memory
        if self.token_to_kv_pool_allocator.page_size == 1:
            out_cache_loc = self.alloc_token_slots(extend_num_tokens)
        else:
            last_loc = get_last_loc(
                self.req_to_token_pool.req_to_token,
                req_pool_indices_tensor,
                prefix_lens_tensor,
            )
            out_cache_loc = self.alloc_paged_token_slots_extend(
                prefix_lens_tensor, seq_lens_tensor, last_loc, extend_num_tokens
            )

Lianmin Zheng's avatar
Lianmin Zheng committed
1256
        # Set fields
Lianmin Zheng's avatar
Lianmin Zheng committed
1257
1258
1259
1260
        self.input_ids = input_ids_tensor
        self.req_pool_indices = req_pool_indices_tensor
        self.seq_lens = seq_lens_tensor
        self.out_cache_loc = out_cache_loc
Rin Intachuen's avatar
Rin Intachuen committed
1261
1262
1263
1264
1265
        self.input_embeds = (
            torch.tensor(input_embeds).to(self.device, non_blocking=True)
            if input_embeds
            else None
        )
1266
1267
1268
1269
        for mm_input in multimodal_inputs:
            if mm_input is None:
                continue
            for mm_item in mm_input.mm_items:
1270
                pixel_values = getattr(mm_item, "feature", None)
1271
                if isinstance(pixel_values, torch.Tensor):
1272
                    mm_item.feature = pixel_values.to(self.device, non_blocking=True)
1273
        self.multimodal_inputs = multimodal_inputs
woodx's avatar
woodx committed
1274
        self.token_type_ids = token_type_ids_tensor
1275
        self.seq_lens_sum = sum(seq_lens)
Lianmin Zheng's avatar
Lianmin Zheng committed
1276

1277
1278
        if self.return_logprob:
            self.top_logprobs_nums = [r.top_logprobs_num for r in reqs]
1279
            self.token_ids_logprobs = [r.token_ids_logprob for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1280

1281
        self.extend_logprob_start_lens = [r.extend_logprob_start_len for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1282
1283
1284
        self.extend_num_tokens = extend_num_tokens
        self.prefix_lens = prefix_lens
        self.extend_lens = extend_lens
1285
        self.extend_input_logprob_token_ids = extend_input_logprob_token_ids
Lianmin Zheng's avatar
Lianmin Zheng committed
1286

1287
        # Write to req_to_token_pool
1288
        if support_triton(global_server_args_dict.get("attention_backend")):
Lianmin Zheng's avatar
Lianmin Zheng committed
1289
1290
            # TODO: some tensors can be reused for ForwardBatchInfo (e.g., extend_lens, cumsum_start)

1291
1292
            write_req_to_token_pool_triton[(bs,)](
                self.req_to_token_pool.req_to_token,
Lianmin Zheng's avatar
Lianmin Zheng committed
1293
1294
1295
1296
1297
                req_pool_indices_tensor,
                prefix_lens_tensor,
                seq_lens_tensor,
                extend_lens_tensor,
                out_cache_loc,
1298
1299
1300
1301
1302
1303
                self.req_to_token_pool.req_to_token.shape[1],
            )
        else:
            pt = 0
            for i in range(bs):
                self.req_to_token_pool.write(
Lianmin Zheng's avatar
Lianmin Zheng committed
1304
1305
                    (req_pool_indices[i], slice(prefix_lens[i], seq_lens[i])),
                    out_cache_loc[pt : pt + extend_lens[i]],
1306
                )
Lianmin Zheng's avatar
Lianmin Zheng committed
1307
                pt += extend_lens[i]
1308

1309
1310
1311
        if self.model_config.is_encoder_decoder:
            self.prepare_encoder_info_extend(input_ids, seq_lens)

1312
        # Build sampling info
1313
        self.sampling_info = SamplingBatchInfo.from_schedule_batch(
1314
1315
            self,
            self.model_config.vocab_size,
1316
        )
1317

1318
1319
1320
1321
1322
    def prepare_for_split_prefill(self):
        self.prepare_for_extend()
        # For split prefill, we need to set the forward mode to SPLIT_PREFILL
        self.forward_mode = ForwardMode.SPLIT_PREFILL

1323
    def mix_with_running(self, running_batch: "ScheduleBatch"):
1324
        self.forward_mode = ForwardMode.MIXED
1325
        running_bs = running_batch.batch_size()
1326
1327
1328
1329
1330

        for req in running_batch.reqs:
            req.fill_ids = req.origin_input_ids + req.output_ids
            req.extend_input_len = 1

1331
        input_ids = torch.cat([self.input_ids, running_batch.input_ids])
1332
        out_cache_loc = torch.cat([self.out_cache_loc, running_batch.out_cache_loc])
1333

1334
        self.merge_batch(running_batch)
1335
1336
        self.input_ids = input_ids
        self.out_cache_loc = out_cache_loc
1337

1338
1339
1340
        # For overlap scheduler, the output_ids has one step delay
        delta = 0 if self.enable_overlap else -1

1341
        # NOTE: prefix_indices is what has been cached, but we don't cache each decode step
1342
        self.prefix_lens.extend(
1343
            [
1344
                len(r.origin_input_ids) + len(r.output_ids) + delta
1345
1346
1347
                for r in running_batch.reqs
            ]
        )
1348
        self.extend_lens.extend([1] * running_bs)
Lianmin Zheng's avatar
Lianmin Zheng committed
1349
1350
        self.extend_num_tokens += running_bs
        # TODO (lianmin): Revisit this. It should be seq_len - 1
1351
        self.extend_logprob_start_lens.extend([0] * running_bs)
1352

1353
1354
1355
1356
    def new_page_count_next_decode(self):
        page_size = self.token_to_kv_pool_allocator.page_size
        if page_size == 1:
            return len(self.reqs)
1357
1358
        # In the decoding phase, the length of a request's KV cache should be
        # the total length of the request minus 1
pansicheng's avatar
pansicheng committed
1359
1360
1361
1362
1363
        return (
            sum(1 for req in self.reqs if req.seqlen % page_size == 0)
            if self.enable_overlap
            else sum(1 for req in self.reqs if (req.seqlen - 1) % page_size == 0)
        )
1364

1365
    def check_decode_mem(self, buf_multiplier=1):
Hanming Lu's avatar
Hanming Lu committed
1366
        num_tokens = (
1367
1368
1369
1370
1371
            self.new_page_count_next_decode()
            * buf_multiplier
            * self.token_to_kv_pool_allocator.page_size
        )

Hanming Lu's avatar
Hanming Lu committed
1372
1373
        self._evict_tree_cache_if_needed(num_tokens)
        return self._is_available_size_sufficient(num_tokens)
1374

1375
    def retract_decode(self, server_args: ServerArgs):
1376
        """Retract the decoding requests when there is not enough memory."""
1377
        sorted_indices = list(range(len(self.reqs)))
Liangsheng Yin's avatar
Liangsheng Yin committed
1378
1379

        # TODO(lsyin): improve retraction policy for radix cache
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
        # For spec decoding, filter_batch API can only filter
        # requests from the back, so we can only retract from the back.
        # TODO(sang): Clean up finish path and support better retract
        # policy.
        if not server_args.speculative_algorithm:
            sorted_indices.sort(
                key=lambda i: (
                    len(self.reqs[i].output_ids),
                    -len(self.reqs[i].origin_input_ids),
                ),
                reverse=True,
            )

        def get_required_tokens(num_reqs: int):
            headroom_for_spec_decode = 0
            if server_args.speculative_algorithm:
                headroom_for_spec_decode += (
                    num_reqs
                    * server_args.speculative_eagle_topk
                    * server_args.speculative_num_steps
                    + num_reqs * server_args.speculative_num_draft_tokens
                )
            return (
                num_reqs * global_config.retract_decode_steps + headroom_for_spec_decode
            )
1405

Hanming Lu's avatar
Hanming Lu committed
1406
1407
1408
1409
1410
1411
1412
1413
1414
        def _get_available_size():
            if self.is_hybrid:
                return min(
                    self.token_to_kv_pool_allocator.full_available_size(),
                    self.token_to_kv_pool_allocator.swa_available_size(),
                )
            else:
                return self.token_to_kv_pool_allocator.available_size()

Lianmin Zheng's avatar
Lianmin Zheng committed
1415
1416
1417
        retracted_reqs = []
        seq_lens_cpu = self.seq_lens.cpu().numpy()
        first_iter = True
Liangsheng Yin's avatar
Liangsheng Yin committed
1418
        while (
Hanming Lu's avatar
Hanming Lu committed
1419
            _get_available_size() < get_required_tokens(len(sorted_indices))
1420
            or first_iter
Liangsheng Yin's avatar
Liangsheng Yin committed
1421
1422
1423
        ):
            if len(sorted_indices) == 1:
                # Corner case: only one request left
Hanming Lu's avatar
Hanming Lu committed
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
                if self.is_hybrid:
                    full_available_size = (
                        self.token_to_kv_pool_allocator.full_available_size()
                    )
                    swa_available_size = (
                        self.token_to_kv_pool_allocator.swa_available_size()
                    )
                    assert (
                        full_available_size > 0 and swa_available_size > 0
                    ), f"No space left for only one request in SWA mode {full_available_size=}, {swa_available_size=}"
                else:
                    assert (
                        self.token_to_kv_pool_allocator.available_size() > 0
                    ), f"No space left for only one request, {self.token_to_kv_pool_allocator.available_size()=}"
Liangsheng Yin's avatar
Liangsheng Yin committed
1438
1439
                break

1440
            first_iter = False
1441
1442
1443
1444
            idx = sorted_indices.pop()
            req = self.reqs[idx]
            retracted_reqs.append(req)

1445
1446
1447
1448
1449
            if server_args.disaggregation_mode == "decode":
                req.offload_kv_cache(
                    self.req_to_token_pool, self.token_to_kv_pool_allocator
                )

1450
1451
            if isinstance(self.tree_cache, ChunkCache):
                # ChunkCache does not have eviction
1452
1453
                token_indices = self.req_to_token_pool.req_to_token[
                    req.req_pool_idx, : seq_lens_cpu[idx]
1454
                ]
1455
                self.token_to_kv_pool_allocator.free(token_indices)
1456
                self.req_to_token_pool.free(req.req_pool_idx)
1457
1458
            else:
                # TODO: apply more fine-grained retraction
1459
                last_uncached_pos = (
1460
1461
                    len(req.prefix_indices) // server_args.page_size
                ) * server_args.page_size
1462
1463
                token_indices = self.req_to_token_pool.req_to_token[
                    req.req_pool_idx, last_uncached_pos : seq_lens_cpu[idx]
1464
                ]
1465
                self.token_to_kv_pool_allocator.free(token_indices)
1466
                self.req_to_token_pool.free(req.req_pool_idx)
1467
1468

                # release the last node
Hanming Lu's avatar
Hanming Lu committed
1469
1470
1471
1472
                if self.is_hybrid:
                    self.tree_cache.dec_lock_ref(req.last_node, req.swa_uuid_for_lock)
                else:
                    self.tree_cache.dec_lock_ref(req.last_node)
1473
1474

                # NOTE(lsyin): we should use the newly evictable memory instantly.
Hanming Lu's avatar
Hanming Lu committed
1475
1476
                num_tokens = len(sorted_indices) * global_config.retract_decode_steps
                self._evict_tree_cache_if_needed(num_tokens)
1477

1478
            req.reset_for_retract()
Liangsheng Yin's avatar
Liangsheng Yin committed
1479

1480
1481
1482
1483
1484
1485
            if len(retracted_reqs) == 0:
                # Corner case: only one request left
                raise ValueError(
                    "Failed to retract any request. No space left for only one request."
                )

1486
        self.filter_batch(keep_indices=sorted_indices)
1487

Liangsheng Yin's avatar
Liangsheng Yin committed
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
        # Reqs in batch are filtered
        total_decoded_tokens = sum(len(r.output_ids) for r in self.reqs)
        total_max_new_tokens = sum(r.sampling_params.max_new_tokens for r in self.reqs)

        new_estimate_ratio = (
            total_decoded_tokens + global_config.retract_decode_steps * len(self.reqs)
        ) / total_max_new_tokens
        new_estimate_ratio = min(1.0, new_estimate_ratio)

        return retracted_reqs, new_estimate_ratio
1498

1499
1500
1501
1502
    def prepare_encoder_info_decode(self):
        # Reset the encoder cached status
        self.encoder_cached = [True] * len(self.reqs)

Ke Bao's avatar
Ke Bao committed
1503
1504
    def prepare_for_idle(self):
        self.forward_mode = ForwardMode.IDLE
Lianmin Zheng's avatar
Lianmin Zheng committed
1505
        self.input_ids = torch.empty(0, dtype=torch.int64, device=self.device)
1506
        self.seq_lens = torch.empty(0, dtype=torch.int64, device=self.device)
Lianmin Zheng's avatar
Lianmin Zheng committed
1507
        self.out_cache_loc = torch.empty(0, dtype=torch.int64, device=self.device)
1508
        self.req_pool_indices = torch.empty(0, dtype=torch.int32, device=self.device)
1509
        self.seq_lens_sum = 0
Ke Bao's avatar
Ke Bao committed
1510
        self.extend_num_tokens = 0
1511
1512
1513
1514
        self.sampling_info = SamplingBatchInfo.from_schedule_batch(
            self,
            self.model_config.vocab_size,
        )
Ke Bao's avatar
Ke Bao committed
1515

1516
    def prepare_for_decode(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
1517
        self.forward_mode = ForwardMode.DECODE
Lianmin Zheng's avatar
Lianmin Zheng committed
1518
1519
        bs = len(self.reqs)

1520
        if self.spec_algorithm.is_eagle():
1521
1522
            # if spec decoding is used, the decode batch is prepared inside
            # `forward_batch_speculative_generation` after running draft models.
1523
            return
Liangsheng Yin's avatar
Liangsheng Yin committed
1524

1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
        if self.sampling_info.penalizer_orchestrator.is_required:
            if self.enable_overlap:
                # TODO: this can be slow, optimize this.
                delayed_output_ids = torch.tensor(
                    [
                        (
                            req.output_ids[-1]
                            if len(req.output_ids)
                            else req.origin_input_ids[-1]
                        )
                        for req in self.reqs
                    ],
                    dtype=torch.int64,
                    device=self.device,
                )
                self.sampling_info.penalizer_orchestrator.cumulate_output_tokens(
                    delayed_output_ids
                )
            else:
                self.sampling_info.penalizer_orchestrator.cumulate_output_tokens(
                    self.output_ids.to(torch.int64)
                )

Lianmin Zheng's avatar
Lianmin Zheng committed
1548
        # Update fields
1549
1550
        self.input_ids = self.output_ids
        self.output_ids = None
Lianmin Zheng's avatar
Lianmin Zheng committed
1551

1552
1553
1554
1555
        if self.model_config.is_encoder_decoder:
            locs = self.encoder_lens + self.seq_lens
            self.prepare_encoder_info_decode()
        else:
Lianmin Zheng's avatar
Lianmin Zheng committed
1556
            locs = self.seq_lens.clone()
1557

1558
        if self.enable_overlap:
1559
1560
1561
1562
1563
            # Do not use in-place operations in the overlap mode
            self.seq_lens = self.seq_lens + 1
        else:
            # A faster in-place version
            self.seq_lens.add_(1)
1564
        self.seq_lens_sum += bs
Lianmin Zheng's avatar
Lianmin Zheng committed
1565

tarinkk's avatar
tarinkk committed
1566
1567
1568
        # free memory
        if isinstance(self.tree_cache, SWAChunkCache):
            for req in self.reqs:
Hanming Lu's avatar
Hanming Lu committed
1569
                self.tree_cache.evict_swa(
tarinkk's avatar
tarinkk committed
1570
1571
1572
                    req, req.seqlen - 1, self.model_config.attention_chunk_size
                )

Lianmin Zheng's avatar
Lianmin Zheng committed
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
        # Allocate memory
        if self.token_to_kv_pool_allocator.page_size == 1:
            self.out_cache_loc = self.alloc_token_slots(bs)
        else:
            last_loc = self.req_to_token_pool.req_to_token[
                self.req_pool_indices, self.seq_lens - 2
            ]
            self.out_cache_loc = self.alloc_paged_token_slots_decode(
                self.seq_lens, last_loc
            )

        self.req_to_token_pool.write(
            (self.req_pool_indices, locs), self.out_cache_loc.to(torch.int32)
        )

1588
1589
    def filter_batch(
        self,
1590
        chunked_req_to_exclude: Optional[Union[Req, List[Req]]] = None,
1591
1592
1593
        keep_indices: Optional[List[int]] = None,
    ):
        if keep_indices is None:
1594
1595
1596
1597
            if isinstance(chunked_req_to_exclude, Req):
                chunked_req_to_exclude = [chunked_req_to_exclude]
            elif chunked_req_to_exclude is None:
                chunked_req_to_exclude = []
1598
1599
1600
            keep_indices = [
                i
                for i in range(len(self.reqs))
1601
                if not self.reqs[i].finished()
Lianmin Zheng's avatar
Lianmin Zheng committed
1602
                and self.reqs[i] not in chunked_req_to_exclude
1603
1604
1605
            ]

        if keep_indices is None or len(keep_indices) == 0:
1606
1607
1608
1609
            # Filter out all requests
            self.reqs = []
            return

1610
        if len(keep_indices) == len(self.reqs):
1611
1612
1613
            # No need to filter
            return

1614
1615
1616
1617
        keep_indices_device = torch.tensor(keep_indices, dtype=torch.int64).to(
            self.device, non_blocking=True
        )

1618
        if self.model_config.is_encoder_decoder:
1619
            self.encoder_lens = self.encoder_lens[keep_indices_device]
1620
1621
            self.encoder_lens_cpu = [self.encoder_lens_cpu[i] for i in keep_indices]

1622
        self.reqs = [self.reqs[i] for i in keep_indices]
1623
1624
        if self.multimodal_inputs is not None:
            self.multimodal_inputs = [self.multimodal_inputs[i] for i in keep_indices]
1625
1626
        self.req_pool_indices = self.req_pool_indices[keep_indices_device]
        self.seq_lens = self.seq_lens[keep_indices_device]
1627
        self.out_cache_loc = None
1628
        self.seq_lens_sum = self.seq_lens.sum().item()
1629
        self.output_ids = self.output_ids[keep_indices_device]
1630
        self.return_logprob = any(req.return_logprob for req in self.reqs)
1631
        if self.return_logprob:
1632
            self.top_logprobs_nums = [self.top_logprobs_nums[i] for i in keep_indices]
1633
            self.token_ids_logprobs = [self.token_ids_logprobs[i] for i in keep_indices]
1634
1635
        else:
            self.top_logprobs_nums = None
1636
            self.token_ids_logprobs = None
1637

1638
        self.has_stream = any(req.stream for req in self.reqs)
1639
        self.has_grammar = any(req.grammar for req in self.reqs)
Lianmin Zheng's avatar
Lianmin Zheng committed
1640

1641
        self.sampling_info.filter_batch(keep_indices, keep_indices_device)
1642
        if self.spec_info:
1643
            self.spec_info.filter_batch(keep_indices_device)
Lianmin Zheng's avatar
Lianmin Zheng committed
1644

1645
    def merge_batch(self, other: "ScheduleBatch"):
1646
1647
1648
        # Penalizer orchestrator must be merged before Batch.reqs is merged. This is because
        # orchestrator.merge() depends on Batch.reqs during preparation of each penalizers, so it
        # needs to be called with pre-merged Batch.reqs.
1649
        self.sampling_info.merge_batch(other.sampling_info)
1650

1651
1652
1653
1654
        # Encoder-decoder infos
        if self.model_config.is_encoder_decoder:
            self.encoder_lens = torch.cat([self.encoder_lens, other.encoder_lens])
            self.encoder_lens_cpu.extend(other.encoder_lens_cpu)
1655
        self.req_pool_indices = torch.cat(
Lianmin Zheng's avatar
Lianmin Zheng committed
1656
1657
            [self.req_pool_indices, other.req_pool_indices]
        )
1658
        self.seq_lens = torch.cat([self.seq_lens, other.seq_lens])
1659
        self.out_cache_loc = None
1660
        self.seq_lens_sum += other.seq_lens_sum
1661
        if self.output_ids is not None:
1662
            self.output_ids = torch.cat([self.output_ids, other.output_ids])
1663
1664
        if self.return_logprob and other.return_logprob:
            self.top_logprobs_nums.extend(other.top_logprobs_nums)
1665
            self.token_ids_logprobs.extend(other.token_ids_logprobs)
1666
1667
        elif self.return_logprob:
            self.top_logprobs_nums.extend([0] * len(other.reqs))
1668
            self.token_ids_logprobs.extend([None] * len(other.reqs))
1669
1670
        elif other.return_logprob:
            self.top_logprobs_nums = [0] * len(self.reqs) + other.top_logprobs_nums
1671
            self.token_ids_logprobs = [None] * len(self.reqs) + other.token_ids_logprobs
1672
        self.reqs.extend(other.reqs)
1673
1674
        if self.multimodal_inputs is not None:
            self.multimodal_inputs.extend(other.multimodal_inputs)
1675

1676
1677
1678
        self.return_logprob |= other.return_logprob
        self.has_stream |= other.has_stream
        self.has_grammar |= other.has_grammar
1679
        self.return_hidden_states |= other.return_hidden_states
1680

1681
1682
1683
        if self.spec_info:
            self.spec_info.merge_batch(other.spec_info)

1684
1685
1686
    def get_model_worker_batch(
        self, seq_lens_cpu_cache: Optional[torch.Tensor] = None
    ) -> ModelWorkerBatch:
1687
        if self.forward_mode.is_decode_or_idle():
1688
            extend_seq_lens = extend_prefix_lens = extend_logprob_start_lens = None
1689
1690
1691
1692
1693
        else:
            extend_seq_lens = self.extend_lens
            extend_prefix_lens = self.prefix_lens
            extend_logprob_start_lens = self.extend_logprob_start_lens

1694
1695
1696
1697
        if self.forward_mode.is_decode_or_idle():
            attention_backend_str = global_server_args_dict["decode_attention_backend"]
        else:
            attention_backend_str = global_server_args_dict["prefill_attention_backend"]
1698
1699
        # Create seq_lens_cpu when needed
        if (
1700
            attention_backend_str == "fa3"
1701
            or (
1702
                global_server_args_dict["use_mla_backend"]
1703
                and attention_backend_str == "flashinfer"
1704
            )
1705
1706
1707
            or attention_backend_str == "flashmla"
            or attention_backend_str == "cutlass_mla"
            or attention_backend_str == "ascend"
1708
            or global_server_args_dict["enable_two_batch_overlap"]
1709
        ):
1710
1711
1712
1713
1714
            seq_lens_cpu = (
                seq_lens_cpu_cache
                if seq_lens_cpu_cache is not None
                else self.seq_lens.cpu()
            )
1715
1716
1717
        else:
            seq_lens_cpu = None

1718
        if self.sampling_info:
Ke Bao's avatar
Ke Bao committed
1719
1720
1721
1722
            if self.has_grammar:
                self.sampling_info.grammars = [req.grammar for req in self.reqs]
            else:
                self.sampling_info.grammars = None
1723

1724
1725
        global bid
        bid += 1
1726
        return ModelWorkerBatch(
1727
            bid=bid,
1728
1729
1730
1731
1732
            forward_mode=self.forward_mode,
            input_ids=self.input_ids,
            req_pool_indices=self.req_pool_indices,
            seq_lens=self.seq_lens,
            out_cache_loc=self.out_cache_loc,
1733
            seq_lens_cpu=seq_lens_cpu,
1734
            seq_lens_sum=self.seq_lens_sum,
1735
1736
            return_logprob=self.return_logprob,
            top_logprobs_nums=self.top_logprobs_nums,
1737
            token_ids_logprobs=self.token_ids_logprobs,
Ke Bao's avatar
Ke Bao committed
1738
            global_num_tokens=self.global_num_tokens,
1739
            global_num_tokens_for_logprob=self.global_num_tokens_for_logprob,
1740
            is_extend_in_batch=self.is_extend_in_batch,
1741
            can_run_dp_cuda_graph=self.can_run_dp_cuda_graph,
1742
1743
            tbo_split_seq_index=self.tbo_split_seq_index,
            global_forward_mode=self.global_forward_mode,
1744
            extend_num_tokens=self.extend_num_tokens,
1745
1746
1747
            extend_seq_lens=extend_seq_lens,
            extend_prefix_lens=extend_prefix_lens,
            extend_logprob_start_lens=extend_logprob_start_lens,
1748
            multimodal_inputs=self.multimodal_inputs,
1749
1750
1751
1752
            encoder_cached=self.encoder_cached,
            encoder_lens=self.encoder_lens,
            encoder_lens_cpu=self.encoder_lens_cpu,
            encoder_out_cache_loc=self.encoder_out_cache_loc,
1753
            lora_ids=[req.lora_id for req in self.reqs],
1754
            sampling_info=self.sampling_info,
Rin Intachuen's avatar
Rin Intachuen committed
1755
            input_embeds=self.input_embeds,
woodx's avatar
woodx committed
1756
            token_type_ids=self.token_type_ids,
1757
1758
            spec_algorithm=self.spec_algorithm,
            spec_info=self.spec_info,
1759
            hicache_consumer_index=self.hicache_consumer_index,
Lianmin Zheng's avatar
Lianmin Zheng committed
1760
            capture_hidden_mode=(
1761
                CaptureHiddenMode.FULL
1762
                if self.return_hidden_states
1763
1764
1765
1766
1767
1768
1769
                else (
                    getattr(
                        self.spec_info, "capture_hidden_mode", CaptureHiddenMode.NULL
                    )
                    if self.spec_info
                    else CaptureHiddenMode.NULL
                )
Lianmin Zheng's avatar
Lianmin Zheng committed
1770
            ),
1771
            extend_input_logprob_token_ids=self.extend_input_logprob_token_ids,
1772
            launch_done=self.launch_done,
1773
1774
        )

1775
    def copy(self):
1776
        # Only contain fields that will be used by process_batch_result
1777
1778
        return ScheduleBatch(
            reqs=self.reqs,
1779
            model_config=self.model_config,
1780
            forward_mode=self.forward_mode,
1781
1782
            out_cache_loc=self.out_cache_loc,
            return_logprob=self.return_logprob,
1783
            decoding_reqs=self.decoding_reqs,
1784
            spec_algorithm=self.spec_algorithm,
1785
            enable_custom_logit_processor=self.enable_custom_logit_processor,
1786
1787
1788
1789
            global_num_tokens=self.global_num_tokens,
            global_num_tokens_for_logprob=self.global_num_tokens_for_logprob,
            can_run_dp_cuda_graph=self.can_run_dp_cuda_graph,
            is_extend_in_batch=self.is_extend_in_batch,
1790
1791
        )

Hanming Lu's avatar
Hanming Lu committed
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
    def _evict_tree_cache_if_needed(
        self,
        num_tokens: int,
    ) -> None:
        if isinstance(self.tree_cache, SWAChunkCache):
            return

        if self.is_hybrid:
            full_available_size = self.token_to_kv_pool_allocator.full_available_size()
            swa_available_size = self.token_to_kv_pool_allocator.swa_available_size()

            if full_available_size < num_tokens or swa_available_size < num_tokens:
                if self.tree_cache is not None:
                    full_num_tokens = max(0, num_tokens - full_available_size)
                    swa_num_tokens = max(0, num_tokens - swa_available_size)
                    self.tree_cache.evict(full_num_tokens, swa_num_tokens)
        else:
            if self.token_to_kv_pool_allocator.available_size() < num_tokens:
                if self.tree_cache is not None:
                    self.tree_cache.evict(num_tokens)

    def _is_available_size_sufficient(self, num_tokens: int) -> bool:
        if self.is_hybrid:
            return (
                self.token_to_kv_pool_allocator.full_available_size() >= num_tokens
                and self.token_to_kv_pool_allocator.swa_available_size() >= num_tokens
            )
        else:
            return self.token_to_kv_pool_allocator.available_size() >= num_tokens

    def _available_and_evictable_str(self) -> str:
        if self.is_hybrid:
            full_available_size = self.token_to_kv_pool_allocator.full_available_size()
            swa_available_size = self.token_to_kv_pool_allocator.swa_available_size()
            full_evictable_size = self.tree_cache.full_evictable_size()
            swa_evictable_size = self.tree_cache.swa_evictable_size()
            return (
                f"Available full tokens: {full_available_size + full_evictable_size} ({full_available_size=} + {full_evictable_size=})\n"
                f"Available swa tokens: {swa_available_size + swa_evictable_size} ({swa_available_size=} + {swa_evictable_size=})\n"
                f"Full LRU list evictable size: {self.tree_cache.full_lru_list_evictable_size()}\n"
                f"SWA LRU list evictable size: {self.tree_cache.swa_lru_list_evictable_size()}\n"
            )
        else:
            available_size = self.token_to_kv_pool_allocator.available_size()
            evictable_size = self.tree_cache.evictable_size()
            return f"Available tokens: {available_size + evictable_size} ({available_size=} + {evictable_size=})\n"

1839
1840
    def __str__(self):
        return (
1841
            f"ScheduleBatch(forward_mode={self.forward_mode.name if self.forward_mode else 'None'}, "
1842
1843
1844
            f"#req={(len(self.reqs))})"
        )

Chayenne's avatar
Chayenne committed
1845

1846
@dataclasses.dataclass
1847
class ModelWorkerBatch:
1848
1849
    # The batch id
    bid: int
1850
1851
1852
    # The forward mode
    forward_mode: ForwardMode
    # The input ids
1853
    input_ids: torch.Tensor
1854
1855
1856
1857
    # The indices of requests in the req_to_token_pool
    req_pool_indices: torch.Tensor
    # The sequence length
    seq_lens: torch.Tensor
1858
    # The indices of output tokens in the token_to_kv_pool_allocator
1859
    out_cache_loc: torch.Tensor
1860
1861
    # The sequence length tensor on CPU
    seq_lens_cpu: Optional[torch.Tensor]
1862
1863
    seq_lens_sum: int

1864
1865
1866
    # For logprob
    return_logprob: bool
    top_logprobs_nums: Optional[List[int]]
1867
    token_ids_logprobs: Optional[List[List[int]]]
1868

Ke Bao's avatar
Ke Bao committed
1869
1870
    # For DP attention
    global_num_tokens: Optional[List[int]]
1871
    global_num_tokens_for_logprob: Optional[List[int]]
1872
    is_extend_in_batch: bool
1873
    can_run_dp_cuda_graph: bool
1874
1875
    tbo_split_seq_index: Optional[int]
    global_forward_mode: Optional[ForwardMode]
Ke Bao's avatar
Ke Bao committed
1876

1877
    # For extend
1878
    extend_num_tokens: Optional[int]
1879
1880
1881
    extend_seq_lens: Optional[List[int]]
    extend_prefix_lens: Optional[List[int]]
    extend_logprob_start_lens: Optional[List[int]]
1882
    extend_input_logprob_token_ids: Optional[torch.Tensor]
1883
1884

    # For multimodal
Mick's avatar
Mick committed
1885
    multimodal_inputs: Optional[List[MultimodalInputs]]
1886

1887
1888
1889
1890
1891
1892
    # For encoder-decoder
    encoder_cached: Optional[List[bool]]
    encoder_lens: Optional[torch.Tensor]
    encoder_lens_cpu: Optional[List[int]]
    encoder_out_cache_loc: Optional[torch.Tensor]

1893
    # For LoRA
1894
    lora_ids: Optional[List[str]]
1895
1896
1897

    # Sampling info
    sampling_info: SamplingBatchInfo
1898

Rin Intachuen's avatar
Rin Intachuen committed
1899
    # The input Embeds
Cheng Wan's avatar
Cheng Wan committed
1900
    input_embeds: Optional[torch.Tensor] = None
Rin Intachuen's avatar
Rin Intachuen committed
1901

woodx's avatar
woodx committed
1902
1903
1904
    # For corss-encoder model
    token_type_ids: Optional[torch.Tensor] = None

1905
    # Speculative decoding
1906
    spec_algorithm: SpeculativeAlgorithm = None
1907
1908
    spec_info: Optional[Union[EagleVerifyInput, EagleDraftInput]] = None
    # If set, the output of the batch contains the hidden states of the run.
Lianmin Zheng's avatar
Lianmin Zheng committed
1909
    capture_hidden_mode: CaptureHiddenMode = None
1910
    hicache_consumer_index: int = 0
1911

1912
1913
1914
    # Overlap event
    launch_done: Optional[threading.Event] = None

1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932

@triton.jit
def write_req_to_token_pool_triton(
    req_to_token_ptr,  # [max_batch, max_context_len]
    req_pool_indices,
    pre_lens,
    seq_lens,
    extend_lens,
    out_cache_loc,
    req_to_token_ptr_stride: tl.constexpr,
):
    BLOCK_SIZE: tl.constexpr = 512
    pid = tl.program_id(0)

    req_pool_index = tl.load(req_pool_indices + pid)
    pre_len = tl.load(pre_lens + pid)
    seq_len = tl.load(seq_lens + pid)

Lianmin Zheng's avatar
Lianmin Zheng committed
1933
1934
    # NOTE: This can be slow for large bs
    cumsum_start = tl.cast(0, tl.int64)
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
    for i in range(pid):
        cumsum_start += tl.load(extend_lens + i)

    num_loop = tl.cdiv(seq_len - pre_len, BLOCK_SIZE)
    for i in range(num_loop):
        offset = tl.arange(0, BLOCK_SIZE) + i * BLOCK_SIZE
        mask = offset < (seq_len - pre_len)
        value = tl.load(out_cache_loc + cumsum_start + offset, mask=mask)
        tl.store(
            req_to_token_ptr
            + req_pool_index * req_to_token_ptr_stride
            + offset
            + pre_len,
            value,
            mask=mask,
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
1951
1952


1953
1954
1955
1956
1957
def get_last_loc(
    req_to_token: torch.Tensor,
    req_pool_indices_tensor: torch.Tensor,
    prefix_lens_tensor: torch.Tensor,
) -> torch.Tensor:
1958
1959
1960
1961
    if (
        global_server_args_dict["attention_backend"] != "ascend"
        and global_server_args_dict["attention_backend"] != "torch_native"
    ):
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
        impl = get_last_loc_triton
    else:
        impl = get_last_loc_torch

    return impl(req_to_token, req_pool_indices_tensor, prefix_lens_tensor)


def get_last_loc_torch(
    req_to_token: torch.Tensor,
    req_pool_indices_tensor: torch.Tensor,
    prefix_lens_tensor: torch.Tensor,
) -> torch.Tensor:
Lianmin Zheng's avatar
Lianmin Zheng committed
1974
1975
1976
1977
1978
    return torch.where(
        prefix_lens_tensor > 0,
        req_to_token[req_pool_indices_tensor, prefix_lens_tensor - 1],
        torch.full_like(prefix_lens_tensor, -1),
    )
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024


@triton.jit
def get_last_loc_kernel(
    req_to_token,
    req_pool_indices_tensor,
    prefix_lens_tensor,
    result,
    num_tokens,
    req_to_token_stride,
    BLOCK_SIZE: tl.constexpr,
):
    pid = tl.program_id(0)
    offset = tl.arange(0, BLOCK_SIZE) + pid * BLOCK_SIZE
    mask = offset < num_tokens

    prefix_lens = tl.load(prefix_lens_tensor + offset, mask=mask, other=0)
    req_pool_indices = tl.load(req_pool_indices_tensor + offset, mask=mask, other=0)

    token_mask = prefix_lens > 0
    token_index = req_pool_indices * req_to_token_stride + (prefix_lens - 1)
    tokens = tl.load(req_to_token + token_index, mask=token_mask, other=-1)

    tl.store(result + offset, tokens, mask=mask)


def get_last_loc_triton(
    req_to_token: torch.Tensor,
    req_pool_indices_tensor: torch.Tensor,
    prefix_lens_tensor: torch.Tensor,
) -> torch.Tensor:
    BLOCK_SIZE = 256
    num_tokens = prefix_lens_tensor.shape[0]
    result = torch.empty_like(prefix_lens_tensor)
    grid = (triton.cdiv(num_tokens, BLOCK_SIZE),)

    get_last_loc_kernel[grid](
        req_to_token,
        req_pool_indices_tensor,
        prefix_lens_tensor,
        result,
        num_tokens,
        req_to_token.stride(0),
        BLOCK_SIZE,
    )
    return result