schedule_batch.py 76 KB
Newer Older
1
2
from __future__ import annotations

3
4
5
6
7
8
9
10
11
12
13
14
15
# Copyright 2023-2024 SGLang Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
16
17
18
19
20
21
22
23
24
25
"""
Store information about requests and batches.

The following is the flow of data structures for a batch:

ScheduleBatch -> ModelWorkerBatch -> ForwardBatch

- ScheduleBatch is managed by `scheduler.py::Scheduler`.
  It contains high-level scheduling data. Most of the data is on the CPU.
- ModelWorkerBatch is managed by `tp_worker.py::TpModelWorker`.
26
27
  It is a subset of `ScheduleBatch` that only contains data related to the model forward on GPU.
  It will be transformed from CPU scheduler to GPU model runner.
28
29
- ForwardBatch is managed by `model_runner.py::ModelRunner`.
  It contains low-level tensor data. Most of the data consists of GPU tensors.
Lianmin Zheng's avatar
Lianmin Zheng committed
30
31

TODO(lmzheng): ModelWorkerBatch seems a bit redundant and we consider removing it in the future.
32
"""
Lianmin Zheng's avatar
Lianmin Zheng committed
33

34
import copy
35
import dataclasses
Ying Sheng's avatar
Ying Sheng committed
36
import logging
37
import threading
Lianmin Zheng's avatar
Lianmin Zheng committed
38
from enum import Enum, auto
39
from http import HTTPStatus
40
from itertools import chain
41
from typing import TYPE_CHECKING, Any, List, Optional, Set, Tuple, Union
Lianmin Zheng's avatar
Lianmin Zheng committed
42

43
import numpy as np
Lianmin Zheng's avatar
Lianmin Zheng committed
44
import torch
45
46
import triton
import triton.language as tl
47

Liangsheng Yin's avatar
Liangsheng Yin committed
48
from sglang.global_config import global_config
49
from sglang.srt.constrained.base_grammar_backend import BaseGrammarObject
50
from sglang.srt.disaggregation.base import BaseKVSender
Byron Hsu's avatar
Byron Hsu committed
51
52
53
from sglang.srt.disaggregation.decode_schedule_batch_mixin import (
    ScheduleBatchDisaggregationDecodeMixin,
)
54
from sglang.srt.distributed.parallel_state import get_tensor_model_parallel_rank
Hanming Lu's avatar
Hanming Lu committed
55
56
57
58
from sglang.srt.mem_cache.allocator import (
    BaseTokenToKVPoolAllocator,
    SWATokenToKVPoolAllocator,
)
59
from sglang.srt.mem_cache.base_prefix_cache import BasePrefixCache
tarinkk's avatar
tarinkk committed
60
from sglang.srt.mem_cache.chunk_cache import ChunkCache, SWAChunkCache
61
from sglang.srt.mem_cache.lora_radix_cache import LoRAKey, LoRARadixCache
62
from sglang.srt.mem_cache.memory_pool import ReqToTokenPool
Hanming Lu's avatar
Hanming Lu committed
63
from sglang.srt.mem_cache.swa_radix_cache import SWARadixCache
64
from sglang.srt.metrics.collector import TimeStats
Lianmin Zheng's avatar
Lianmin Zheng committed
65
from sglang.srt.model_executor.forward_batch_info import CaptureHiddenMode, ForwardMode
66
from sglang.srt.sampling.sampling_batch_info import SamplingBatchInfo
67
from sglang.srt.sampling.sampling_params import SamplingParams
68
from sglang.srt.server_args import ServerArgs
69
from sglang.srt.utils import flatten_nested_list, support_triton
Liangsheng Yin's avatar
Liangsheng Yin committed
70

71
if TYPE_CHECKING:
Cheng Wan's avatar
Cheng Wan committed
72
    from sglang.srt.configs.model_config import ModelConfig
73
74
75
    from sglang.srt.speculative.eagle_utils import EagleDraftInput, EagleVerifyInput
    from sglang.srt.speculative.spec_info import SpeculativeAlgorithm

Liangsheng Yin's avatar
Liangsheng Yin committed
76
INIT_INCREMENTAL_DETOKENIZATION_OFFSET = 5
Lianmin Zheng's avatar
Lianmin Zheng committed
77

78
79
GLOBAL_SERVER_ARGS_KEYS = [
    "attention_backend",
80
    "mm_attention_backend",
81
82
83
84
85
86
87
88
    "debug_tensor_dump_inject",
    "debug_tensor_dump_output_folder",
    "chunked_prefill_size",
    "device",
    "disable_chunked_prefix_cache",
    "disable_radix_cache",
    "enable_dp_attention",
    "enable_two_batch_overlap",
89
    "tbo_token_distribution_threshold",
90
    "enable_dp_lm_head",
91
    "moe_a2a_backend",
92
    "deepep_mode",
93
94
    "enable_flashinfer_cutlass_moe",
    "enable_flashinfer_trtllm_moe",
95
    "enable_flashinfer_allreduce_fusion",
96
97
    "moe_dense_tp_size",
    "ep_dispatch_algorithm",
98
    "deepep_config",
99
    "ep_num_redundant_experts",
100
101
102
103
104
105
    "enable_nan_detection",
    "flashinfer_mla_disable_ragged",
    "max_micro_batch_size",
    "disable_shared_experts_fusion",
    "sampling_backend",
    "speculative_accept_threshold_single",
106
    "speculative_accept_threshold_acc",
107
108
    "torchao_config",
    "triton_attention_reduce_in_fp32",
109
    "num_reserved_decode_tokens",
110
    "weight_loader_disable_mmap",
Yuan Luo's avatar
Yuan Luo committed
111
    "enable_triton_kernel_moe",
112
    "enable_flashinfer_mxfp4_moe",
113
    "enable_multimodal",
114
    "enable_symm_mem",
115
    "quantization",
116
117
]

118
# Put some global args for easy access
119
global_server_args_dict = {k: getattr(ServerArgs, k) for k in GLOBAL_SERVER_ARGS_KEYS}
120

Ying Sheng's avatar
Ying Sheng committed
121
122
123
logger = logging.getLogger(__name__)


124
125
126
class BaseFinishReason:
    def __init__(self, is_error: bool = False):
        self.is_error = is_error
Lianmin Zheng's avatar
Lianmin Zheng committed
127

128
    def to_json(self):
129
        raise NotImplementedError()
130
131
132


class FINISH_MATCHED_TOKEN(BaseFinishReason):
Mingyi's avatar
Mingyi committed
133
    def __init__(self, matched: Union[int, List[int]]):
134
135
136
        super().__init__()
        self.matched = matched

137
138
139
140
141
    def to_json(self):
        return {
            "type": "stop",  # to match OpenAI API's return value
            "matched": self.matched,
        }
142
143


144
145
class FINISH_MATCHED_STR(BaseFinishReason):
    def __init__(self, matched: str):
146
        super().__init__()
147
        self.matched = matched
148

149
150
151
152
153
    def to_json(self):
        return {
            "type": "stop",  # to match OpenAI API's return value
            "matched": self.matched,
        }
154
155


156
157
class FINISH_LENGTH(BaseFinishReason):
    def __init__(self, length: int):
158
        super().__init__()
159
        self.length = length
160

161
162
163
164
165
    def to_json(self):
        return {
            "type": "length",  # to match OpenAI API's return value
            "length": self.length,
        }
166
167
168


class FINISH_ABORT(BaseFinishReason):
Lianmin Zheng's avatar
Lianmin Zheng committed
169
    def __init__(self, message=None, status_code=None, err_type=None):
170
        super().__init__(is_error=True)
Lianmin Zheng's avatar
Lianmin Zheng committed
171
        self.message = message or "Aborted"
172
173
        self.status_code = status_code
        self.err_type = err_type
174

175
176
177
    def to_json(self):
        return {
            "type": "abort",
Lianmin Zheng's avatar
Lianmin Zheng committed
178
            "message": self.message,
179
180
            "status_code": self.status_code,
            "err_type": self.err_type,
181
        }
182

Lianmin Zheng's avatar
Lianmin Zheng committed
183

Mick's avatar
Mick committed
184
185
186
187
188
189
class Modality(Enum):
    IMAGE = auto()
    MULTI_IMAGES = auto()
    VIDEO = auto()
    AUDIO = auto()

190
191
192
193
194
195
196
197
198
    @staticmethod
    def from_str(modality_str: str):
        try:
            return Modality[modality_str.upper()]
        except KeyError:
            raise ValueError(
                f"Invalid modality string: {modality_str}. Valid modalities are: {[m.name for m in Modality]}"
            )

199
200
201
202
    @staticmethod
    def all():
        return [Modality.IMAGE, Modality.VIDEO, Modality.AUDIO]

Mick's avatar
Mick committed
203

204
@dataclasses.dataclass
Mick's avatar
Mick committed
205
206
class MultimodalDataItem:
    """
207
208
209
    One MultimodalDataItem contains all inputs for one modality.
    For example, if there are 3 images and 1 audio inputs, there will be 2 MultimodalDataItem.
    One for images and one for audio.
210

211
    We put the common fields first and the model-specific fields in model_specific_data.
Mick's avatar
Mick committed
212
    """
213

Mick's avatar
Mick committed
214
215
216
    modality: Modality
    hash: int = None
    pad_value: int = None
217
    offsets: Optional[list] = None
Mick's avatar
Mick committed
218

219
220
    # the raw features returned by processor, e.g. pixel_values or audio_features
    feature: Union[torch.Tensor, np.ndarray] = None
Mick's avatar
Mick committed
221
222
    # the precomputed embeddings, passed as final encoder embeddings
    # One and only one of the feature and precomputed_embeddings will be empty
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
    precomputed_embeddings: Optional[Union[torch.Tensor, np.ndarray]] = None

    # Model-specific data stored in a dictionary
    model_specific_data: dict[str, Any] = dataclasses.field(default_factory=dict)

    def __getattr__(self, name: str):
        if (
            "model_specific_data" in self.__dict__
            and name in self.__dict__["model_specific_data"]
        ):
            return self.__dict__["model_specific_data"][name]
        else:
            raise AttributeError(
                f"'{self.__class__.__name__}' object has no attribute '{name}'"
            )
Mick's avatar
Mick committed
238

239
240
241
242
243
    def __setitem__(self, key: str, value: Any):
        if key in self.__dict__:
            self.__dict__[key] = value
        else:
            self.model_specific_data[key] = value
244

245
246
    def set(self, key: str, value: Any):
        self.__setitem__(key, value)
247

Mick's avatar
Mick committed
248
249
250
251
252
253
254
255
    @staticmethod
    def is_empty_list(l):
        if l is None:
            return True
        return len([item for item in flatten_nested_list(l) if item is not None]) == 0

    def set_pad_value(self):
        """
Mick's avatar
Mick committed
256
        Set the pad value after first hashing the data
Mick's avatar
Mick committed
257
        """
258
        from sglang.srt.managers.mm_utils import hash_feature
Mick's avatar
Mick committed
259

260
        if self.hash is None:
261
262
            if self.feature is not None:
                hashed_feature = self.feature
263
            else:
264
                hashed_feature = self.precomputed_embeddings
265
            self.hash = hash_feature(hashed_feature)
Mick's avatar
Mick committed
266
267
268
        assert self.hash is not None
        self.pad_value = self.hash % (1 << 30)

269
270
271
    def is_modality(self, modality: Modality) -> bool:
        return self.modality == modality

Mick's avatar
Mick committed
272
    def is_audio(self):
273
        return self.modality == Modality.AUDIO
Mick's avatar
Mick committed
274
275

    def is_image(self):
276
        return self.modality in [Modality.IMAGE, Modality.MULTI_IMAGES]
Mick's avatar
Mick committed
277
278

    def is_video(self):
279
        return self.modality == Modality.VIDEO
Mick's avatar
Mick committed
280

281
282
283
    def is_valid(self) -> bool:
        return self.is_image() or self.is_video() or self.is_audio()

Mick's avatar
Mick committed
284
285
286
287
    def validate(self):
        ...
        # TODO

288
289
290
291
292
293
294
295
296
297
    @staticmethod
    def from_dict(obj: dict):
        kwargs = dict(obj)
        modality = kwargs.pop("modality")
        if isinstance(modality, str):
            modality = Modality[modality]
        ret = MultimodalDataItem(modality=modality, **kwargs)
        ret.validate()
        return ret

298
    def merge(self, other):
299
        self.feature += other.feature
300
        self.offsets += other.offsets
301
302
303
        self.hash = hash((self.hash, other.hash))
        self.set_pad_value()

Mick's avatar
Mick committed
304
305
306
307
308
309
310

@dataclasses.dataclass
class MultimodalInputs:
    """The multimodal data related inputs."""

    # items of data
    mm_items: List[MultimodalDataItem]
311
    image_pad_len: Optional[list] = None
312
    num_image_tokens: Optional[int] = None
Liangsheng Yin's avatar
Liangsheng Yin committed
313

Mick's avatar
Mick committed
314
    # image
Mick's avatar
Mick committed
315
    im_token_id: Optional[int] = None
316
317
318
319
    im_start_id: Optional[int] = None
    im_end_id: Optional[int] = None
    slice_start_id: Optional[int] = None
    slice_end_id: Optional[int] = None
Mick's avatar
Mick committed
320
321
322

    # video
    video_token_id: Optional[int] = None
Mick's avatar
Mick committed
323

Mick's avatar
Mick committed
324
    # audio
325
326
327
    audio_token_id: Optional[int] = None
    audio_start_id: Optional[int] = None
    audio_end_id: Optional[int] = None
Mick's avatar
Mick committed
328

329
330
331
332
    # QWen2-VL related
    mrope_positions: Optional[torch.Tensor] = None
    mrope_position_delta: Optional[torch.Tensor] = None

Liangsheng Yin's avatar
Liangsheng Yin committed
333
    @staticmethod
334
    def from_dict(obj: dict):
Mick's avatar
Mick committed
335
        ret = MultimodalInputs(
Mick's avatar
Mick committed
336
            mm_items=obj["mm_items"],
Liangsheng Yin's avatar
Liangsheng Yin committed
337
        )
338

Mick's avatar
Mick committed
339
        assert isinstance(ret.mm_items, list)
340
        ret.mm_items = [item for item in ret.mm_items if item.is_valid()]
Mick's avatar
Mick committed
341
342
        for item in ret.mm_items:
            item.set_pad_value()
343
344

        optional_args = [
345
346
            "mrope_positions",
            "mrope_position_delta",
347
            "im_token_id",
Mick's avatar
Mick committed
348
349
            "im_start_id",
            "im_end_id",
350
            "video_token_id",
Mick's avatar
Mick committed
351
352
            "slice_start_id",
            "slice_end_id",
Mick's avatar
Mick committed
353
354
            "audio_start_id",
            "audio_end_id",
355
            "audio_token_id",
356
357
358
359
360
        ]
        for arg in optional_args:
            if arg in obj:
                setattr(ret, arg, obj[arg])

Liangsheng Yin's avatar
Liangsheng Yin committed
361
362
        return ret

Mick's avatar
Mick committed
363
    def contains_image_inputs(self) -> bool:
Mick's avatar
Mick committed
364
        return any(item.is_image() for item in self.mm_items)
Mick's avatar
Mick committed
365

366
367
368
    def contains_video_inputs(self) -> bool:
        return any(item.is_video() for item in self.mm_items)

Mick's avatar
Mick committed
369
    def contains_audio_inputs(self) -> bool:
Mick's avatar
Mick committed
370
371
        return any(item.is_audio() for item in self.mm_items)

372
373
    def contains_mm_input(self) -> bool:
        return any(True for item in self.mm_items if item.is_valid())
Mick's avatar
Mick committed
374
375

    def merge(self, other: MultimodalInputs):
376
377
378
        """
        merge image inputs when requests are being merged
        """
379

380
        # args needed to be merged
381
        optional_args = [
Mick's avatar
Mick committed
382
            "mm_items",
383
            "image_pad_len",
384
385
        ]
        for arg in optional_args:
386
387
388
            self_arg = getattr(self, arg, None)
            if self_arg is not None:
                setattr(self, arg, self_arg + getattr(other, arg))
389
390
391
392
393
394
395
396
397
398

        mrope_positions = self.mrope_positions
        if mrope_positions is not None:
            if other.mrope_positions is None:
                self.mrope_positions = mrope_positions
            else:
                self.mrope_positions = torch.cat(
                    [self.mrope_positions, other.mrope_positions], dim=1
                )

399
400
401
402
403
404
405
406
        mrope_position_delta = self.mrope_position_delta
        if mrope_position_delta is not None:
            if other.mrope_position_delta is None:
                self.mrope_position_delta = mrope_position_delta
            else:
                self.mrope_position_delta = torch.cat(
                    [self.mrope_position_delta, other.mrope_position_delta], dim=0
                )
407
408
409
410
411
412

        for key, val in other.__dict__.items():
            if "_id" in key:
                # set token_ids
                if getattr(self, key, None) is None:
                    setattr(self, key, getattr(other, key, None))
413
        # other args would be kept intact
414

Liangsheng Yin's avatar
Liangsheng Yin committed
415

Lianmin Zheng's avatar
Lianmin Zheng committed
416
class Req:
417
    """The input and output status of a request."""
418

419
420
421
422
    def __init__(
        self,
        rid: str,
        origin_input_text: str,
423
        origin_input_ids: List[int],
424
        sampling_params: SamplingParams,
Lianmin Zheng's avatar
Lianmin Zheng committed
425
426
        return_logprob: bool = False,
        top_logprobs_num: int = 0,
427
        token_ids_logprob: List[int] = None,
Lianmin Zheng's avatar
Lianmin Zheng committed
428
        stream: bool = False,
429
        origin_input_ids_unpadded: Optional[Tuple[int]] = None,
430
        lora_id: Optional[str] = None,
Rin Intachuen's avatar
Rin Intachuen committed
431
        input_embeds: Optional[List[List[float]]] = None,
woodx's avatar
woodx committed
432
        token_type_ids: List[int] = None,
433
        session_id: Optional[str] = None,
434
        custom_logit_processor: Optional[str] = None,
435
        return_hidden_states: bool = False,
436
        eos_token_ids: Optional[Set[int]] = None,
437
        bootstrap_host: Optional[str] = None,
438
        bootstrap_port: Optional[int] = None,
439
        bootstrap_room: Optional[int] = None,
440
        data_parallel_rank: Optional[int] = None,
441
        vocab_size: Optional[int] = None,
442
    ):
443
        # Input and output info
Lianmin Zheng's avatar
Lianmin Zheng committed
444
        self.rid = rid
Liangsheng Yin's avatar
Liangsheng Yin committed
445
        self.origin_input_text = origin_input_text
446
447
448
449
450
        self.origin_input_ids_unpadded = (
            origin_input_ids_unpadded
            if origin_input_ids_unpadded
            else origin_input_ids  # Before image padding
        )
Liangsheng Yin's avatar
Liangsheng Yin committed
451
        self.origin_input_ids = origin_input_ids
452
453
454
        # Each decode stage's output ids
        self.output_ids = []
        # fill_ids = origin_input_ids + output_ids. Updated if chunked.
455
        self.fill_ids = []
456
        self.session_id = session_id
Lianmin Zheng's avatar
Lianmin Zheng committed
457
        self.input_embeds = input_embeds
458

woodx's avatar
woodx committed
459
460
461
        # for corss-endoder model
        self.token_type_ids = token_type_ids

tarinkk's avatar
tarinkk committed
462
463
464
        # The length of KV that have been removed in local attention chunked prefill
        self.evicted_seqlen_local = 0

Lianmin Zheng's avatar
Lianmin Zheng committed
465
        # Sampling info
466
467
468
469
470
        if isinstance(sampling_params.custom_params, dict):
            sampling_params = copy.copy(sampling_params)
            sampling_params.custom_params = sampling_params.custom_params | {
                "__req__": self
            }
471
        self.sampling_params = sampling_params
472
        self.custom_logit_processor = custom_logit_processor
473
        self.return_hidden_states = return_hidden_states
474
        self.lora_id = lora_id
Liangsheng Yin's avatar
Liangsheng Yin committed
475

476
        # Memory pool info
477
        self.req_pool_idx: Optional[int] = None
478

479
480
481
        # Check finish
        self.tokenizer = None
        self.finished_reason = None
Lianmin Zheng's avatar
Lianmin Zheng committed
482
483
        # Whether this request has finished output
        self.finished_output = None
484
485
        # If we want to abort the request in the middle of the event loop, set this to true
        # Note: We should never set finished_reason in the middle, the req will get filtered and never respond
486
        self.to_abort = False
Lianmin Zheng's avatar
Lianmin Zheng committed
487
        # This carries the error message for `.to_abort` and will be attached to the finished_reason at the end of the event loop
Lianmin Zheng's avatar
Lianmin Zheng committed
488
        self.to_abort_message: str = None
Lianmin Zheng's avatar
Lianmin Zheng committed
489
        self.stream = stream
490
        self.eos_token_ids = eos_token_ids
491
        self.vocab_size = vocab_size
492

493
        # For incremental decoding
494
495
496
497
498
499
500
501
        # ----- | --------- read_ids -------|
        # ----- |   surr_ids  |
        # xxxxx | xxxxxxxxxxx | xxxxxxxxxxx |
        # ----- ^ ----------- ^ ----------- ^
        # ----- 1 ----------- 2 ----------- 3
        # 1: surr_offset
        # 2: read_offset
        # 3: last token
Liangsheng Yin's avatar
Liangsheng Yin committed
502
503
        self.surr_offset = None  # Surrounding offset to defeat the cleanup algorithm
        self.read_offset = None
Lianmin Zheng's avatar
Lianmin Zheng committed
504
        self.decoded_text = ""
505

506
        # For multimodal inputs
Mick's avatar
Mick committed
507
        self.multimodal_inputs: Optional[MultimodalInputs] = None
508

509
        # Prefix info
510
        # The indices to kv cache for the shared prefix.
511
        self.prefix_indices: torch.Tensor = []
512
        # Number of tokens to run prefill.
513
        self.extend_input_len = 0
514
515
        # The relative logprob_start_len in an extend batch
        self.extend_logprob_start_len = 0
516
517
518
        self.last_node: Any = None
        self.last_host_node: Any = None
        self.host_hit_length = 0
Hanming Lu's avatar
Hanming Lu committed
519
520
        # The node to lock until for swa radix tree lock ref
        self.swa_uuid_for_lock: Optional[int] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
521

522
523
524
525
        # Whether or not if it is chunked. It increments whenever
        # it is chunked, and decrement whenever chunked request is
        # processed.
        self.is_chunked = 0
526

527
528
529
        # For retraction
        self.is_retracted = False

530
531
532
533
534
535
536
        # Incremental streamining
        self.send_token_offset: int = 0
        self.send_decode_id_offset: int = 0
        # TODO (Byron): send_output_token_logprobs_offset and send_decode_id_offset can be different in disaggregation mode
        # because the decode server does not have the first output token logprobs
        self.send_output_token_logprobs_offset: int = 0

537
        # Logprobs (arguments)
Lianmin Zheng's avatar
Lianmin Zheng committed
538
        self.return_logprob = return_logprob
539
        # Start index to compute logprob from.
540
        self.logprob_start_len = 0
Lianmin Zheng's avatar
Lianmin Zheng committed
541
        self.top_logprobs_num = top_logprobs_num
542
        self.token_ids_logprob = token_ids_logprob
Lianmin Zheng's avatar
Lianmin Zheng committed
543
544
        self.temp_scaled_logprobs = False
        self.top_p_normalized_logprobs = False
545

546
        # Logprobs (return values)
547
548
        # True means the input logprob has been already sent to detokenizer.
        self.input_logprob_sent: bool = False
549
550
551
552
        self.input_token_logprobs_val: Optional[List[float]] = None
        self.input_token_logprobs_idx: Optional[List[int]] = None
        self.input_top_logprobs_val: Optional[List[float]] = None
        self.input_top_logprobs_idx: Optional[List[int]] = None
553
554
555
556
557
558
559
560
        self.input_token_ids_logprobs_val: Optional[List[float]] = None
        self.input_token_ids_logprobs_idx: Optional[List[int]] = None
        # Temporary holder to store input_token_logprobs.
        self.input_token_logprobs: Optional[List[Tuple[int]]] = None
        self.temp_input_top_logprobs_val: Optional[List[torch.Tensor]] = None
        self.temp_input_top_logprobs_idx: Optional[List[int]] = None
        self.temp_input_token_ids_logprobs_val: Optional[List[float]] = None
        self.temp_input_token_ids_logprobs_idx: Optional[List[int]] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
561
562

        if return_logprob:
563
            # shape: (bs, 1)
Lianmin Zheng's avatar
Lianmin Zheng committed
564
565
            self.output_token_logprobs_val = []
            self.output_token_logprobs_idx = []
566
            # shape: (bs, k)
Lianmin Zheng's avatar
Lianmin Zheng committed
567
568
            self.output_top_logprobs_val = []
            self.output_top_logprobs_idx = []
569
570
            self.output_token_ids_logprobs_val = []
            self.output_token_ids_logprobs_idx = []
Lianmin Zheng's avatar
Lianmin Zheng committed
571
572
573
        else:
            self.output_token_logprobs_val = self.output_token_logprobs_idx = (
                self.output_top_logprobs_val
574
575
576
            ) = self.output_top_logprobs_idx = self.output_token_ids_logprobs_val = (
                self.output_token_ids_logprobs_idx
            ) = None
577
        self.hidden_states: List[List[float]] = []
578
        self.hidden_states_tensor = None  # Note: use tensor instead of list to transfer hidden_states when PD + MTP
579

580
        # Embedding (return values)
581
        self.embedding = None
Lianmin Zheng's avatar
Lianmin Zheng committed
582

583
        # Constrained decoding
584
        self.grammar: Optional[BaseGrammarObject] = None
585
        self.grammar_wait_ct = 0
Liangsheng Yin's avatar
Liangsheng Yin committed
586

587
        # The number of cached tokens that were already cached in the KV cache
588
        self.cached_tokens = 0
589
        self.already_computed = 0
590

591
592
593
        # The number of verification forward passes in the speculative decoding.
        # This is used to compute the average acceptance length per request.
        self.spec_verify_ct = 0
594
595
596
597
598
599

        # For metrics
        self.time_stats: TimeStats = TimeStats()
        self.has_log_time_stats: bool = False
        self.queue_time_start = None
        self.queue_time_end = None
600

Byron Hsu's avatar
Byron Hsu committed
601
        # For disaggregation
602
        self.bootstrap_host: str = bootstrap_host
603
        self.bootstrap_port: Optional[int] = bootstrap_port
604
        self.bootstrap_room: Optional[int] = bootstrap_room
605
        self.disagg_kv_sender: Optional[BaseKVSender] = None
Byron Hsu's avatar
Byron Hsu committed
606

607
608
609
        # For data parallel rank routing
        self.data_parallel_rank: Optional[int] = data_parallel_rank

Byron Hsu's avatar
Byron Hsu committed
610
611
612
613
614
615
616
        # the start index of the sent kv cache
        # We want to send it chunk by chunk for chunked prefill.
        # After every chunk forward, we do the following:
        # kv_send(req.input_ids[req.start_send_idx:len(req.fill_ids)])
        # start_send_idx = len(req.fill_ids)
        self.start_send_idx: int = 0

617
618
619
620
        # For overlap schedule, we delay the kv transfer until `process_batch_result_disagg_prefill` rather than `process_prefill_chunk` in non-overlap
        # This is because kv is not ready in `process_prefill_chunk`.
        # We use `tmp_end_idx` to store the end index of the kv cache to send.
        self.tmp_end_idx: int = -1
Lianmin Zheng's avatar
Lianmin Zheng committed
621
        self.metadata_buffer_index: int = -1
622

623
624
625
626
    @property
    def seqlen(self):
        return len(self.origin_input_ids) + len(self.output_ids)

627
    def extend_image_inputs(self, image_inputs):
Mick's avatar
Mick committed
628
629
        if self.multimodal_inputs is None:
            self.multimodal_inputs = image_inputs
630
        else:
Mick's avatar
Mick committed
631
            self.multimodal_inputs.merge(image_inputs)
632

633
    def finished(self) -> bool:
Lianmin Zheng's avatar
Lianmin Zheng committed
634
        # Whether request reached finished condition
635
636
        return self.finished_reason is not None

637
638
639
640
    def init_next_round_input(
        self,
        tree_cache: Optional[BasePrefixCache] = None,
    ):
641
        self.fill_ids = self.origin_input_ids + self.output_ids
642
        if tree_cache is not None:
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
            if isinstance(tree_cache, LoRARadixCache):
                (
                    self.prefix_indices,
                    self.last_node,
                    self.last_host_node,
                    self.host_hit_length,
                ) = tree_cache.match_prefix_with_lora_id(
                    key=LoRAKey(
                        lora_id=self.lora_id, token_ids=self.adjust_max_prefix_ids()
                    ),
                )
            else:
                (
                    self.prefix_indices,
                    self.last_node,
                    self.last_host_node,
                    self.host_hit_length,
                ) = tree_cache.match_prefix(
                    key=self.adjust_max_prefix_ids(),
                )
663
        self.extend_input_len = len(self.fill_ids) - len(self.prefix_indices)
664

665
    def adjust_max_prefix_ids(self):
666
667
        self.fill_ids = self.origin_input_ids + self.output_ids
        input_len = len(self.fill_ids)
668
669
670
671

        # FIXME: To work around some bugs in logprob computation, we need to ensure each
        # request has at least one token. Later, we can relax this requirement and use `input_len`.
        max_prefix_len = input_len - 1
Liangsheng Yin's avatar
Liangsheng Yin committed
672
673
674
675
676

        if self.sampling_params.max_new_tokens > 0:
            # Need at least one token to compute logits
            max_prefix_len = min(max_prefix_len, input_len - 1)

677
        if self.return_logprob:
678
            max_prefix_len = min(max_prefix_len, self.logprob_start_len)
679

680
        max_prefix_len = max(max_prefix_len, 0)
681
        return self.fill_ids[:max_prefix_len]
682

Liangsheng Yin's avatar
Liangsheng Yin committed
683
    # Based on https://github.com/vllm-project/vllm/blob/7a64d24aad69e4d2548aa0bf528d9fe63428ab01/vllm/transformers_utils/detokenizer.py#L194-L313
684
    def init_incremental_detokenize(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
685
686
687
688
689
690
691
692
693
        first_iter = self.surr_offset is None or self.read_offset is None

        if first_iter:
            self.read_offset = len(self.origin_input_ids_unpadded)
            self.surr_offset = max(
                self.read_offset - INIT_INCREMENTAL_DETOKENIZATION_OFFSET, 0
            )

        all_ids = self.origin_input_ids_unpadded + self.output_ids
694
        return all_ids[self.surr_offset :], self.read_offset - self.surr_offset
Liangsheng Yin's avatar
Liangsheng Yin committed
695

696
    def check_finished(self):
697
        if self.finished():
698
699
            return

700
        if self.to_abort:
701
702
703
            self.finished_reason = FINISH_ABORT(
                message=self.to_abort_message,
            )
704
705
            return

Liangsheng Yin's avatar
Liangsheng Yin committed
706
        if len(self.output_ids) >= self.sampling_params.max_new_tokens:
707
708
709
            self.finished_reason = FINISH_LENGTH(
                length=self.sampling_params.max_new_tokens
            )
710
711
            return

712
713
714
715
716
        if self.grammar is not None:
            if self.grammar.is_terminated():
                self.finished_reason = FINISH_MATCHED_TOKEN(matched=self.output_ids[-1])
                return

717
        last_token_id = self.output_ids[-1]
718

719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
        if not self.sampling_params.ignore_eos:
            matched_eos = False

            # Check stop token ids
            if self.sampling_params.stop_token_ids:
                matched_eos = last_token_id in self.sampling_params.stop_token_ids
            if self.eos_token_ids:
                matched_eos |= last_token_id in self.eos_token_ids
            if self.tokenizer is not None:
                matched_eos |= last_token_id == self.tokenizer.eos_token_id
                if self.tokenizer.additional_stop_token_ids:
                    matched_eos |= (
                        last_token_id in self.tokenizer.additional_stop_token_ids
                    )
            if matched_eos:
                self.finished_reason = FINISH_MATCHED_TOKEN(matched=last_token_id)
                return
736

737
738
739
740
741
742
743
744
        if last_token_id > self.vocab_size or last_token_id < 0:
            if self.sampling_params.stop_token_ids:
                self.output_ids[-1] = next(iter(self.sampling_params.stop_token_ids))
            if self.eos_token_ids:
                self.output_ids[-1] = next(iter(self.eos_token_ids))
            self.finished_reason = FINISH_MATCHED_STR(matched="NaN happened")
            return

745
        # Check stop strings
746
747
748
749
750
751
        if len(self.sampling_params.stop_strs) > 0:
            tail_str = self.tokenizer.decode(
                self.output_ids[-(self.sampling_params.stop_str_max_len + 1) :]
            )

            for stop_str in self.sampling_params.stop_strs:
Liangsheng Yin's avatar
Liangsheng Yin committed
752
                if stop_str in tail_str or stop_str in self.decoded_text:
753
                    self.finished_reason = FINISH_MATCHED_STR(matched=stop_str)
754
755
                    return

756
757
758
    def reset_for_retract(self):
        self.prefix_indices = []
        self.last_node = None
Hanming Lu's avatar
Hanming Lu committed
759
        self.swa_uuid_for_lock = None
760
761
        self.extend_input_len = 0
        self.is_retracted = True
762
763
764
765
766
767
        self.input_token_logprobs = None
        self.temp_input_top_logprobs_val = None
        self.temp_input_top_logprobs_idx = None
        self.extend_logprob_start_len = 0
        self.is_chunked = 0
        self.req_pool_idx = None
768
        self.already_computed = 0
769

Lianmin Zheng's avatar
Lianmin Zheng committed
770
771
772
773
774
775
776
777
778
779
780
781
782
    def offload_kv_cache(self, req_to_token_pool, token_to_kv_pool_allocator):
        token_indices = req_to_token_pool.req_to_token[
            self.req_pool_idx, : self.seqlen - 1
        ]
        self.kv_cache_cpu = token_to_kv_pool_allocator.get_cpu_copy(token_indices)

    def load_kv_cache(self, req_to_token_pool, token_to_kv_pool_allocator):
        token_indices = req_to_token_pool.req_to_token[
            self.req_pool_idx, : self.seqlen - 1
        ]
        token_to_kv_pool_allocator.load_cpu_copy(self.kv_cache_cpu, token_indices)
        del self.kv_cache_cpu

783
784
785
786
787
788
789
790
791
792
793
794
    def log_time_stats(self):
        # If overlap schedule, we schedule one decode batch ahead so this gets called twice.
        if self.has_log_time_stats is True:
            return

        if self.bootstrap_room is not None:
            prefix = f"Req Time Stats(rid={self.rid}, bootstrap_room={self.bootstrap_room}, input len={len(self.origin_input_ids)}, output len={len(self.output_ids)}, type={self.time_stats.get_type().value})"
        else:
            prefix = f"Req Time Stats(rid={self.rid}, input len={len(self.origin_input_ids)}, output len={len(self.output_ids)}, type={self.time_stats.get_type().value})"
        logger.info(f"{prefix}: {self.time_stats}")
        self.has_log_time_stats = True

795
796
797
798
799
800
    def set_finish_with_abort(self, error_msg: str):
        if get_tensor_model_parallel_rank() == 0:
            logger.error(f"{error_msg}, {self.rid=}")
        self.multimodal_inputs = None
        self.grammar = None
        self.origin_input_ids = [0]  # set it to one token to skip the long prefill
801
        self.return_logprob = False
802
803
804
805
        self.finished_reason = FINISH_ABORT(
            error_msg, HTTPStatus.BAD_REQUEST, "BadRequestError"
        )

Lianmin Zheng's avatar
Lianmin Zheng committed
806
    def __repr__(self):
807
        return (
808
            f"Req(rid={self.rid}, "
Lianmin Zheng's avatar
Lianmin Zheng committed
809
810
811
            f"input_ids={self.origin_input_ids}, output_ids={self.output_ids}, "
            f"{self.grammar=}, "
            f"{self.sampling_params=})"
812
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
813
814


Lianmin Zheng's avatar
Lianmin Zheng committed
815
# Batch id
816
817
818
bid = 0


819
@dataclasses.dataclass
Byron Hsu's avatar
Byron Hsu committed
820
class ScheduleBatch(ScheduleBatchDisaggregationDecodeMixin):
821
    """Store all information of a batch on the scheduler."""
822

823
    # Request, memory pool, and cache
824
    reqs: List[Req]
825
    req_to_token_pool: ReqToTokenPool = None
826
    token_to_kv_pool_allocator: BaseTokenToKVPoolAllocator = None
827
    tree_cache: BasePrefixCache = None
Hanming Lu's avatar
Hanming Lu committed
828
    is_hybrid: bool = False
829

830
    # Batch configs
831
    model_config: ModelConfig = None
Liangsheng Yin's avatar
Liangsheng Yin committed
832
    forward_mode: ForwardMode = None
833
    enable_overlap: bool = False
Lianmin Zheng's avatar
Lianmin Zheng committed
834
835
836
837
    # Tell whether the current running batch is full so that we can skip
    # the check of whether to prefill new requests.
    # This is an optimization to reduce the overhead of the prefill check.
    batch_is_full: bool = False
838

839
840
841
    # Events
    launch_done: Optional[threading.Event] = None

842
843
844
    # For chunked prefill in PP
    chunked_req: Optional[Req] = None

845
    # Sampling info
846
    sampling_info: SamplingBatchInfo = None
847
    next_batch_sampling_info: SamplingBatchInfo = None
Liangsheng Yin's avatar
Liangsheng Yin committed
848

849
    # Batched arguments to model runner
Lianmin Zheng's avatar
Lianmin Zheng committed
850
    input_ids: torch.Tensor = None  # shape: [b], int64
851
    input_embeds: torch.Tensor = None  # shape: [b, hidden_size], float32
woodx's avatar
woodx committed
852
    token_type_ids: torch.Tensor = None  # shape: [b], int64
Lianmin Zheng's avatar
Lianmin Zheng committed
853
    req_pool_indices: torch.Tensor = None  # shape: [b], int64
854
    seq_lens: torch.Tensor = None  # shape: [b], int64
855
    # The output locations of the KV cache
Lianmin Zheng's avatar
Lianmin Zheng committed
856
857
    out_cache_loc: torch.Tensor = None  # shape: [b], int64
    output_ids: torch.Tensor = None  # shape: [b], int64
858

859
860
861
    # For multimodal inputs
    multimodal_inputs: Optional[List] = None

862
863
    # The sum of all sequence lengths
    seq_lens_sum: int = None
864
865
    # The original sequence lengths, Qwen-1M related
    orig_seq_lens: torch.Tensor = None  # shape: [b], int32
866

Ke Bao's avatar
Ke Bao committed
867
868
    # For DP attention
    global_num_tokens: Optional[List[int]] = None
869
    global_num_tokens_for_logprob: Optional[List[int]] = None
870
    is_extend_in_batch: bool = False
871
    can_run_dp_cuda_graph: bool = False
872
873
    tbo_split_seq_index: Optional[int] = None
    global_forward_mode: Optional[ForwardMode] = None
Ke Bao's avatar
Ke Bao committed
874

875
    # For processing logprobs
876
    return_logprob: bool = False
877
    top_logprobs_nums: Optional[List[int]] = None
878
    token_ids_logprobs: Optional[List[List[int]]] = None
879

Lianmin Zheng's avatar
Lianmin Zheng committed
880
881
882
883
    # For logits and logprob post processing
    temp_scaled_logprobs: bool = False
    top_p_normalized_logprobs: bool = False

884
885
886
    # For extend and mixed chunekd prefill
    prefix_lens: List[int] = None
    extend_lens: List[int] = None
887
    extend_num_tokens: Optional[int] = None
888
    decoding_reqs: List[Req] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
889
    extend_logprob_start_lens: List[int] = None
890
891
    # It comes empty list if logprob is not required.
    extend_input_logprob_token_ids: Optional[torch.Tensor] = None
892

Lianmin Zheng's avatar
Lianmin Zheng committed
893
    # For encoder-decoder architectures
894
895
896
897
898
    encoder_cached: Optional[List[bool]] = None
    encoder_lens: Optional[torch.Tensor] = None
    encoder_lens_cpu: Optional[List[int]] = None
    encoder_out_cache_loc: Optional[torch.Tensor] = None

899
900
901
    # Stream
    has_stream: bool = False

902
903
    # Has grammar
    has_grammar: bool = False
904

905
    # Device
906
907
    device: str = "cuda"

908
    # Speculative decoding
909
    spec_algorithm: SpeculativeAlgorithm = None
910
    spec_info: Optional[Union[EagleDraftInput, EagleVerifyInput]] = None
911

912
913
914
    # Enable custom logit processor
    enable_custom_logit_processor: bool = False

915
916
917
    # Whether to return hidden states
    return_hidden_states: bool = False

918
919
920
    # hicache pointer for synchronizing data loading from CPU to GPU
    hicache_consumer_index: int = 0

921
    @classmethod
922
923
    def init_new(
        cls,
924
        reqs: List[Req],
925
        req_to_token_pool: ReqToTokenPool,
926
        token_to_kv_pool_allocator: BaseTokenToKVPoolAllocator,
927
928
929
        tree_cache: BasePrefixCache,
        model_config: ModelConfig,
        enable_overlap: bool,
930
        spec_algorithm: SpeculativeAlgorithm,
931
        enable_custom_logit_processor: bool,
932
        chunked_req: Optional[Req] = None,
933
    ):
Lianmin Zheng's avatar
Lianmin Zheng committed
934
935
        return_logprob = any(req.return_logprob for req in reqs)

Hanming Lu's avatar
Hanming Lu committed
936
937
        is_hybrid = False
        if isinstance(token_to_kv_pool_allocator, SWATokenToKVPoolAllocator):
938
939
940
941
            assert (
                tree_cache is None
                or isinstance(tree_cache, SWARadixCache)
                or isinstance(tree_cache, SWAChunkCache)
Hanming Lu's avatar
Hanming Lu committed
942
943
944
            ), "SWARadixCache or SWAChunkCache is required for SWATokenToKVPoolAllocator"
            is_hybrid = True

945
946
947
        return cls(
            reqs=reqs,
            req_to_token_pool=req_to_token_pool,
948
            token_to_kv_pool_allocator=token_to_kv_pool_allocator,
949
            tree_cache=tree_cache,
Hanming Lu's avatar
Hanming Lu committed
950
            is_hybrid=is_hybrid,
951
            model_config=model_config,
952
            enable_overlap=enable_overlap,
Lianmin Zheng's avatar
Lianmin Zheng committed
953
            return_logprob=return_logprob,
954
            has_stream=any(req.stream for req in reqs),
955
            has_grammar=any(req.grammar for req in reqs),
Zhang, Liangang's avatar
Zhang, Liangang committed
956
            device=req_to_token_pool.device,
957
            spec_algorithm=spec_algorithm,
958
            enable_custom_logit_processor=enable_custom_logit_processor,
959
            return_hidden_states=any(req.return_hidden_states for req in reqs),
960
            chunked_req=chunked_req,
Lianmin Zheng's avatar
Lianmin Zheng committed
961
962
        )

963
    def batch_size(self):
964
        return len(self.reqs)
965

Lianmin Zheng's avatar
Lianmin Zheng committed
966
967
968
    def is_empty(self):
        return len(self.reqs) == 0

969
    def alloc_req_slots(self, num_reqs: int):
970
971
972
        req_pool_indices = self.req_to_token_pool.alloc(num_reqs)
        if req_pool_indices is None:
            raise RuntimeError(
973
974
975
976
                "alloc_req_slots runs out of memory. "
                "Please set a smaller number for `--max-running-requests`. "
                f"{self.req_to_token_pool.available_size()=}, "
                f"{num_reqs=}, "
977
978
979
            )
        return req_pool_indices

980
    def alloc_token_slots(self, num_tokens: int, backup_state: bool = False):
Hanming Lu's avatar
Hanming Lu committed
981
        self._evict_tree_cache_if_needed(num_tokens)
Lianmin Zheng's avatar
Lianmin Zheng committed
982

983
984
985
        if backup_state:
            state = self.token_to_kv_pool_allocator.backup_state()

986
        out_cache_loc = self.token_to_kv_pool_allocator.alloc(num_tokens)
Lianmin Zheng's avatar
Lianmin Zheng committed
987
988
989
990
991
        if out_cache_loc is None:
            phase_str = "Prefill" if self.forward_mode.is_extend() else "Decode"
            error_msg = (
                f"{phase_str} out of memory. Try to lower your batch size.\n"
                f"Try to allocate {num_tokens} tokens.\n"
Hanming Lu's avatar
Hanming Lu committed
992
                f"{self._available_and_evictable_str()}"
Lianmin Zheng's avatar
Lianmin Zheng committed
993
994
995
996
997
998
            )
            logger.error(error_msg)
            if self.tree_cache is not None:
                self.tree_cache.pretty_print()
            raise RuntimeError(error_msg)

999
1000
1001
1002
        if backup_state:
            return out_cache_loc, state
        else:
            return out_cache_loc
Lianmin Zheng's avatar
Lianmin Zheng committed
1003
1004
1005
1006
1007
1008
1009

    def alloc_paged_token_slots_extend(
        self,
        prefix_lens: torch.Tensor,
        seq_lens: torch.Tensor,
        last_loc: torch.Tensor,
        extend_num_tokens: int,
1010
        backup_state: bool = False,
Lianmin Zheng's avatar
Lianmin Zheng committed
1011
    ):
Hanming Lu's avatar
Hanming Lu committed
1012
1013
        num_tokens = (
            extend_num_tokens
Lianmin Zheng's avatar
Lianmin Zheng committed
1014
            + len(seq_lens) * self.token_to_kv_pool_allocator.page_size
Hanming Lu's avatar
Hanming Lu committed
1015
1016
        )
        self._evict_tree_cache_if_needed(num_tokens)
1017

1018
1019
1020
        if backup_state:
            state = self.token_to_kv_pool_allocator.backup_state()

Lianmin Zheng's avatar
Lianmin Zheng committed
1021
1022
1023
        out_cache_loc = self.token_to_kv_pool_allocator.alloc_extend(
            prefix_lens, seq_lens, last_loc, extend_num_tokens
        )
1024
        if out_cache_loc is None:
Lianmin Zheng's avatar
Lianmin Zheng committed
1025
1026
1027
            error_msg = (
                f"Prefill out of memory. Try to lower your batch size.\n"
                f"Try to allocate {extend_num_tokens} tokens.\n"
Hanming Lu's avatar
Hanming Lu committed
1028
                f"{self._available_and_evictable_str()}"
Lianmin Zheng's avatar
Lianmin Zheng committed
1029
1030
1031
            )
            logger.error(error_msg)
            raise RuntimeError(error_msg)
1032
1033
1034
1035
1036

        if backup_state:
            return out_cache_loc, state
        else:
            return out_cache_loc
Lianmin Zheng's avatar
Lianmin Zheng committed
1037
1038
1039
1040
1041

    def alloc_paged_token_slots_decode(
        self,
        seq_lens: torch.Tensor,
        last_loc: torch.Tensor,
1042
        backup_state: bool = False,
Lianmin Zheng's avatar
Lianmin Zheng committed
1043
    ):
Hanming Lu's avatar
Hanming Lu committed
1044
1045
1046
        num_tokens = len(seq_lens) * self.token_to_kv_pool_allocator.page_size

        self._evict_tree_cache_if_needed(num_tokens)
1047

1048
1049
1050
1051
        if backup_state:
            state = self.token_to_kv_pool_allocator.backup_state()

        out_cache_loc = self.token_to_kv_pool_allocator.alloc_decode(seq_lens, last_loc)
Lianmin Zheng's avatar
Lianmin Zheng committed
1052
1053
1054
1055
        if out_cache_loc is None:
            error_msg = (
                f"Decode out of memory. Try to lower your batch size.\n"
                f"Try to allocate {len(seq_lens)} tokens.\n"
Hanming Lu's avatar
Hanming Lu committed
1056
                f"{self._available_and_evictable_str()}"
Lianmin Zheng's avatar
Lianmin Zheng committed
1057
1058
1059
            )
            logger.error(error_msg)
            raise RuntimeError(error_msg)
1060
1061
1062
1063
1064

        if backup_state:
            return out_cache_loc, state
        else:
            return out_cache_loc
1065

1066
1067
1068
1069
1070
    def prepare_encoder_info_extend(self, input_ids: List[int], seq_lens: List[int]):
        self.encoder_lens_cpu = []
        self.encoder_cached = []

        for req in self.reqs:
Mick's avatar
Mick committed
1071
            im = req.multimodal_inputs
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
            if im is None or im.num_image_tokens is None:
                # No image input
                self.encoder_lens_cpu.append(0)
                self.encoder_cached.append(True)
            else:
                self.encoder_lens_cpu.append(im.num_image_tokens)
                self.encoder_cached.append(
                    self.forward_mode.is_decode()
                    or len(req.prefix_indices) >= im.num_image_tokens
                )

1083
        self.encoder_lens = torch.tensor(self.encoder_lens_cpu, dtype=torch.int64).to(
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
            self.device, non_blocking=True
        )

        # Strip encoder infos
        pt = 0
        decoder_out_cache_loc = []
        encoder_out_cache_loc = []
        for i, req in enumerate(self.reqs):
            encoder_len = self.encoder_lens_cpu[i]
            seq_lens[i] -= encoder_len

            if len(req.prefix_indices) < encoder_len:
1096
                # NOTE: the encoder part should be considered as a whole
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
                assert len(req.prefix_indices) == 0
                input_ids[i] = input_ids[i][encoder_len:]
                encoder_out_cache_loc.append(self.out_cache_loc[pt : pt + encoder_len])
                decoder_out_cache_loc.append(
                    self.out_cache_loc[pt + encoder_len : pt + req.extend_input_len]
                )
                self.extend_lens[i] -= encoder_len
                self.extend_num_tokens -= encoder_len
            else:
                decoder_out_cache_loc.append(
                    self.out_cache_loc[pt : pt + req.extend_input_len]
                )
                self.prefix_lens[i] -= encoder_len

            pt += req.extend_input_len

        # Reassign
Lianmin Zheng's avatar
Lianmin Zheng committed
1114
        self.input_ids = torch.tensor(sum(input_ids, []), dtype=torch.int64).to(
1115
1116
            self.device, non_blocking=True
        )
1117
        self.seq_lens = torch.tensor(seq_lens, dtype=torch.int64).to(
1118
1119
1120
1121
            self.device, non_blocking=True
        )

        if not decoder_out_cache_loc:
Lianmin Zheng's avatar
Lianmin Zheng committed
1122
            self.out_cache_loc = torch.zeros(0, dtype=torch.int64).to(
1123
1124
1125
1126
1127
1128
                self.device, non_blocking=True
            )
        else:
            self.out_cache_loc = torch.cat(decoder_out_cache_loc)

        if not encoder_out_cache_loc:
Lianmin Zheng's avatar
Lianmin Zheng committed
1129
            self.encoder_out_cache_loc = torch.zeros(0, dtype=torch.int64).to(
1130
1131
1132
1133
1134
                self.device, non_blocking=True
            )
        else:
            self.encoder_out_cache_loc = torch.cat(encoder_out_cache_loc)

1135
1136
1137
        assert (
            len(self.out_cache_loc) == self.extend_num_tokens
        ), f"Expected {len(self.out_cache_loc)}, got {self.extend_num_tokens}"
1138

1139
    def prepare_for_extend(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
1140
1141
        self.forward_mode = ForwardMode.EXTEND

Lianmin Zheng's avatar
Lianmin Zheng committed
1142
        # Allocate req slots
1143
        bs = len(self.reqs)
Lianmin Zheng's avatar
Lianmin Zheng committed
1144
1145
1146
        req_pool_indices = self.alloc_req_slots(bs)

        # Init tensors
Lianmin Zheng's avatar
Lianmin Zheng committed
1147
        reqs = self.reqs
1148
        input_ids = [r.fill_ids[len(r.prefix_indices) :] for r in reqs]
1149
        extend_num_tokens = sum(len(ids) for ids in input_ids)
Lianmin Zheng's avatar
Lianmin Zheng committed
1150
        seq_lens = [len(r.fill_ids) for r in reqs]
1151
        orig_seq_lens = [max(len(r.fill_ids), len(r.origin_input_ids)) for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1152
1153
        prefix_lens = [len(r.prefix_indices) for r in reqs]
        extend_lens = [r.extend_input_len for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1154

woodx's avatar
woodx committed
1155
1156
1157
1158
        token_type_ids = [
            r.token_type_ids for r in reqs if r.token_type_ids is not None
        ]

Lianmin Zheng's avatar
Lianmin Zheng committed
1159
1160
1161
        req_pool_indices_tensor = torch.tensor(req_pool_indices, dtype=torch.int64).to(
            self.device, non_blocking=True
        )
1162
1163
1164
        input_ids_tensor = torch.tensor(
            list(chain.from_iterable(input_ids)), dtype=torch.int64
        ).to(self.device, non_blocking=True)
Lianmin Zheng's avatar
Lianmin Zheng committed
1165
1166
1167
        seq_lens_tensor = torch.tensor(seq_lens, dtype=torch.int64).to(
            self.device, non_blocking=True
        )
1168
1169
1170
        orig_seq_lens_tensor = torch.tensor(orig_seq_lens, dtype=torch.int32).to(
            self.device, non_blocking=True
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
1171
1172
1173
        prefix_lens_tensor = torch.tensor(
            prefix_lens, dtype=torch.int64, device=self.device
        )
woodx's avatar
woodx committed
1174
1175
1176
1177
1178
1179
1180

        token_type_ids_tensor = None
        if len(token_type_ids) > 0:
            token_type_ids_tensor = torch.tensor(
                sum(token_type_ids, []), dtype=torch.int64
            ).to(self.device, non_blocking=True)

Lianmin Zheng's avatar
Lianmin Zheng committed
1181
        extend_lens_tensor = seq_lens_tensor - prefix_lens_tensor
1182

Lianmin Zheng's avatar
Lianmin Zheng committed
1183
        # Copy prefix and do some basic check
Rin Intachuen's avatar
Rin Intachuen committed
1184
        input_embeds = []
1185
        extend_input_logprob_token_ids = []
1186
        multimodal_inputs = []
Rin Intachuen's avatar
Rin Intachuen committed
1187

Lianmin Zheng's avatar
Lianmin Zheng committed
1188
        for i, (req, seq_len, pre_len) in enumerate(zip(reqs, seq_lens, prefix_lens)):
1189
            req.req_pool_idx = req_pool_indices[i]
1190
            assert seq_len - pre_len == req.extend_input_len
Lianmin Zheng's avatar
Lianmin Zheng committed
1191

1192
            if pre_len > 0:
1193
1194
                self.req_to_token_pool.write(
                    (req.req_pool_idx, slice(0, pre_len)), req.prefix_indices
1195
                )
tarinkk's avatar
tarinkk committed
1196
                if isinstance(self.tree_cache, SWAChunkCache):
Hanming Lu's avatar
Hanming Lu committed
1197
                    self.tree_cache.evict_swa(
tarinkk's avatar
tarinkk committed
1198
1199
                        req, pre_len, self.model_config.attention_chunk_size
                    )
1200

Rin Intachuen's avatar
Rin Intachuen committed
1201
1202
1203
1204
1205
            # If input_embeds are available, store them
            if req.input_embeds is not None:
                # If req.input_embeds is already a list, append its content directly
                input_embeds.extend(req.input_embeds)  # Use extend to avoid nesting

1206
1207
            multimodal_inputs.append(req.multimodal_inputs)

1208
1209
            req.cached_tokens += pre_len - req.already_computed
            req.already_computed = seq_len
1210
            req.is_retracted = False
Lianmin Zheng's avatar
Lianmin Zheng committed
1211

1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
            # Compute the relative logprob_start_len in an extend batch
            if req.logprob_start_len >= pre_len:
                req.extend_logprob_start_len = min(
                    req.logprob_start_len - pre_len,
                    req.extend_input_len,
                    req.seqlen - 1,
                )
            else:
                req.extend_logprob_start_len = 0

            if self.return_logprob:
                # Find input logprob token ids.
                # First, find a global index within origin_input_ids and slide it by 1
                # to compute input logprobs. It is because you need the next token
                # to compute input logprobs. E.g., (chunk size 2)
                #
                # input_logprobs = [1, 2, 3, 4]
                # fill_ids = [1, 2]
                # extend_input_logprob_token_id = [2, 3]
                #
                # Note that it can also overflow. In this case, we pad it with 0.
                # input_logprobs = [1, 2, 3, 4]
                # fill_ids = [3, 4]
                # extend_input_logprob_token_id = [4, 0]
                global_start_idx, global_end_idx = (
                    len(req.prefix_indices),
                    len(req.fill_ids),
                )
                # Apply logprob_start_len
                if global_start_idx < req.logprob_start_len:
                    global_start_idx = req.logprob_start_len

                logprob_token_ids = req.origin_input_ids[
                    global_start_idx + 1 : global_end_idx + 1
                ]
                extend_input_logprob_token_ids.extend(logprob_token_ids)

                # We will need req.extend_input_len - req.extend_logprob_start_len number of
                # tokens, and logprob_token_ids is for input logprob, so pad the rest of them by 0.
                extend_input_logprob_token_ids.extend(
                    [0]
                    * (
                        req.extend_input_len
                        - req.extend_logprob_start_len
                        - len(logprob_token_ids)
                    )
                )

        if self.return_logprob:
            extend_input_logprob_token_ids = torch.tensor(
                extend_input_logprob_token_ids
            )
        else:
            extend_input_logprob_token_ids = None
Lianmin Zheng's avatar
Lianmin Zheng committed
1266

Lianmin Zheng's avatar
Lianmin Zheng committed
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
        # Allocate memory
        if self.token_to_kv_pool_allocator.page_size == 1:
            out_cache_loc = self.alloc_token_slots(extend_num_tokens)
        else:
            last_loc = get_last_loc(
                self.req_to_token_pool.req_to_token,
                req_pool_indices_tensor,
                prefix_lens_tensor,
            )
            out_cache_loc = self.alloc_paged_token_slots_extend(
                prefix_lens_tensor, seq_lens_tensor, last_loc, extend_num_tokens
            )

Lianmin Zheng's avatar
Lianmin Zheng committed
1280
        # Set fields
Lianmin Zheng's avatar
Lianmin Zheng committed
1281
1282
1283
        self.input_ids = input_ids_tensor
        self.req_pool_indices = req_pool_indices_tensor
        self.seq_lens = seq_lens_tensor
1284
        self.orig_seq_lens = orig_seq_lens_tensor
Lianmin Zheng's avatar
Lianmin Zheng committed
1285
        self.out_cache_loc = out_cache_loc
Rin Intachuen's avatar
Rin Intachuen committed
1286
1287
1288
1289
1290
        self.input_embeds = (
            torch.tensor(input_embeds).to(self.device, non_blocking=True)
            if input_embeds
            else None
        )
1291
1292
1293
1294
        for mm_input in multimodal_inputs:
            if mm_input is None:
                continue
            for mm_item in mm_input.mm_items:
1295
                pixel_values = getattr(mm_item, "feature", None)
1296
                if isinstance(pixel_values, torch.Tensor):
1297
                    mm_item.feature = pixel_values.to(self.device, non_blocking=True)
1298
        self.multimodal_inputs = multimodal_inputs
woodx's avatar
woodx committed
1299
        self.token_type_ids = token_type_ids_tensor
1300
        self.seq_lens_sum = sum(seq_lens)
Lianmin Zheng's avatar
Lianmin Zheng committed
1301

1302
1303
        if self.return_logprob:
            self.top_logprobs_nums = [r.top_logprobs_num for r in reqs]
1304
            self.token_ids_logprobs = [r.token_ids_logprob for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1305

1306
        self.extend_logprob_start_lens = [r.extend_logprob_start_len for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1307
1308
1309
        self.extend_num_tokens = extend_num_tokens
        self.prefix_lens = prefix_lens
        self.extend_lens = extend_lens
1310
        self.extend_input_logprob_token_ids = extend_input_logprob_token_ids
Lianmin Zheng's avatar
Lianmin Zheng committed
1311

1312
        # Write to req_to_token_pool
1313
        if support_triton(global_server_args_dict.get("attention_backend")):
Lianmin Zheng's avatar
Lianmin Zheng committed
1314
1315
            # TODO: some tensors can be reused for ForwardBatchInfo (e.g., extend_lens, cumsum_start)

1316
1317
            write_req_to_token_pool_triton[(bs,)](
                self.req_to_token_pool.req_to_token,
Lianmin Zheng's avatar
Lianmin Zheng committed
1318
1319
1320
1321
1322
                req_pool_indices_tensor,
                prefix_lens_tensor,
                seq_lens_tensor,
                extend_lens_tensor,
                out_cache_loc,
1323
1324
1325
1326
1327
1328
                self.req_to_token_pool.req_to_token.shape[1],
            )
        else:
            pt = 0
            for i in range(bs):
                self.req_to_token_pool.write(
Lianmin Zheng's avatar
Lianmin Zheng committed
1329
1330
                    (req_pool_indices[i], slice(prefix_lens[i], seq_lens[i])),
                    out_cache_loc[pt : pt + extend_lens[i]],
1331
                )
Lianmin Zheng's avatar
Lianmin Zheng committed
1332
                pt += extend_lens[i]
1333

1334
1335
1336
        if self.model_config.is_encoder_decoder:
            self.prepare_encoder_info_extend(input_ids, seq_lens)

1337
        # Build sampling info
1338
        self.sampling_info = SamplingBatchInfo.from_schedule_batch(
1339
1340
            self,
            self.model_config.vocab_size,
1341
        )
1342

1343
1344
1345
1346
1347
    def prepare_for_split_prefill(self):
        self.prepare_for_extend()
        # For split prefill, we need to set the forward mode to SPLIT_PREFILL
        self.forward_mode = ForwardMode.SPLIT_PREFILL

1348
    def mix_with_running(self, running_batch: "ScheduleBatch"):
1349
        self.forward_mode = ForwardMode.MIXED
1350
        running_bs = running_batch.batch_size()
1351
1352
1353
1354
1355

        for req in running_batch.reqs:
            req.fill_ids = req.origin_input_ids + req.output_ids
            req.extend_input_len = 1

1356
        input_ids = torch.cat([self.input_ids, running_batch.input_ids])
1357
        out_cache_loc = torch.cat([self.out_cache_loc, running_batch.out_cache_loc])
1358

1359
        self.merge_batch(running_batch)
1360
1361
        self.input_ids = input_ids
        self.out_cache_loc = out_cache_loc
1362

1363
1364
1365
        # For overlap scheduler, the output_ids has one step delay
        delta = 0 if self.enable_overlap else -1

1366
        # NOTE: prefix_indices is what has been cached, but we don't cache each decode step
1367
        self.prefix_lens.extend(
1368
            [
1369
                len(r.origin_input_ids) + len(r.output_ids) + delta
1370
1371
1372
                for r in running_batch.reqs
            ]
        )
1373
        self.extend_lens.extend([1] * running_bs)
Lianmin Zheng's avatar
Lianmin Zheng committed
1374
1375
        self.extend_num_tokens += running_bs
        # TODO (lianmin): Revisit this. It should be seq_len - 1
1376
        self.extend_logprob_start_lens.extend([0] * running_bs)
1377

1378
1379
1380
1381
    def new_page_count_next_decode(self):
        page_size = self.token_to_kv_pool_allocator.page_size
        if page_size == 1:
            return len(self.reqs)
1382
1383
        # In the decoding phase, the length of a request's KV cache should be
        # the total length of the request minus 1
pansicheng's avatar
pansicheng committed
1384
1385
1386
1387
1388
        return (
            sum(1 for req in self.reqs if req.seqlen % page_size == 0)
            if self.enable_overlap
            else sum(1 for req in self.reqs if (req.seqlen - 1) % page_size == 0)
        )
1389

1390
    def check_decode_mem(self, buf_multiplier=1):
Hanming Lu's avatar
Hanming Lu committed
1391
        num_tokens = (
1392
1393
1394
1395
1396
            self.new_page_count_next_decode()
            * buf_multiplier
            * self.token_to_kv_pool_allocator.page_size
        )

Hanming Lu's avatar
Hanming Lu committed
1397
1398
        self._evict_tree_cache_if_needed(num_tokens)
        return self._is_available_size_sufficient(num_tokens)
1399

1400
    def retract_decode(self, server_args: ServerArgs):
1401
        """Retract the decoding requests when there is not enough memory."""
1402
        sorted_indices = list(range(len(self.reqs)))
Liangsheng Yin's avatar
Liangsheng Yin committed
1403
1404

        # TODO(lsyin): improve retraction policy for radix cache
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
        # For spec decoding, filter_batch API can only filter
        # requests from the back, so we can only retract from the back.
        # TODO(sang): Clean up finish path and support better retract
        # policy.
        if not server_args.speculative_algorithm:
            sorted_indices.sort(
                key=lambda i: (
                    len(self.reqs[i].output_ids),
                    -len(self.reqs[i].origin_input_ids),
                ),
                reverse=True,
            )

        def get_required_tokens(num_reqs: int):
            headroom_for_spec_decode = 0
            if server_args.speculative_algorithm:
                headroom_for_spec_decode += (
                    num_reqs
                    * server_args.speculative_eagle_topk
                    * server_args.speculative_num_steps
                    + num_reqs * server_args.speculative_num_draft_tokens
                )
            return (
                num_reqs * global_config.retract_decode_steps + headroom_for_spec_decode
            )
1430

Hanming Lu's avatar
Hanming Lu committed
1431
1432
1433
1434
1435
1436
1437
1438
1439
        def _get_available_size():
            if self.is_hybrid:
                return min(
                    self.token_to_kv_pool_allocator.full_available_size(),
                    self.token_to_kv_pool_allocator.swa_available_size(),
                )
            else:
                return self.token_to_kv_pool_allocator.available_size()

Lianmin Zheng's avatar
Lianmin Zheng committed
1440
1441
1442
        retracted_reqs = []
        seq_lens_cpu = self.seq_lens.cpu().numpy()
        first_iter = True
Liangsheng Yin's avatar
Liangsheng Yin committed
1443
        while (
Hanming Lu's avatar
Hanming Lu committed
1444
            _get_available_size() < get_required_tokens(len(sorted_indices))
1445
            or first_iter
Liangsheng Yin's avatar
Liangsheng Yin committed
1446
1447
1448
        ):
            if len(sorted_indices) == 1:
                # Corner case: only one request left
Hanming Lu's avatar
Hanming Lu committed
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
                if self.is_hybrid:
                    full_available_size = (
                        self.token_to_kv_pool_allocator.full_available_size()
                    )
                    swa_available_size = (
                        self.token_to_kv_pool_allocator.swa_available_size()
                    )
                    assert (
                        full_available_size > 0 and swa_available_size > 0
                    ), f"No space left for only one request in SWA mode {full_available_size=}, {swa_available_size=}"
                else:
                    assert (
                        self.token_to_kv_pool_allocator.available_size() > 0
                    ), f"No space left for only one request, {self.token_to_kv_pool_allocator.available_size()=}"
Liangsheng Yin's avatar
Liangsheng Yin committed
1463
1464
                break

1465
            first_iter = False
1466
1467
1468
1469
            idx = sorted_indices.pop()
            req = self.reqs[idx]
            retracted_reqs.append(req)

1470
1471
1472
1473
1474
            if server_args.disaggregation_mode == "decode":
                req.offload_kv_cache(
                    self.req_to_token_pool, self.token_to_kv_pool_allocator
                )

1475
1476
            if isinstance(self.tree_cache, ChunkCache):
                # ChunkCache does not have eviction
1477
1478
                token_indices = self.req_to_token_pool.req_to_token[
                    req.req_pool_idx, : seq_lens_cpu[idx]
1479
                ]
1480
                self.token_to_kv_pool_allocator.free(token_indices)
1481
                self.req_to_token_pool.free(req.req_pool_idx)
1482
1483
            else:
                # TODO: apply more fine-grained retraction
1484
                last_uncached_pos = (
1485
1486
                    len(req.prefix_indices) // server_args.page_size
                ) * server_args.page_size
1487
1488
                token_indices = self.req_to_token_pool.req_to_token[
                    req.req_pool_idx, last_uncached_pos : seq_lens_cpu[idx]
1489
                ]
1490
                self.token_to_kv_pool_allocator.free(token_indices)
1491
                self.req_to_token_pool.free(req.req_pool_idx)
1492
1493

                # release the last node
Hanming Lu's avatar
Hanming Lu committed
1494
1495
1496
1497
                if self.is_hybrid:
                    self.tree_cache.dec_lock_ref(req.last_node, req.swa_uuid_for_lock)
                else:
                    self.tree_cache.dec_lock_ref(req.last_node)
1498
1499

                # NOTE(lsyin): we should use the newly evictable memory instantly.
Hanming Lu's avatar
Hanming Lu committed
1500
1501
                num_tokens = len(sorted_indices) * global_config.retract_decode_steps
                self._evict_tree_cache_if_needed(num_tokens)
1502

1503
            req.reset_for_retract()
Liangsheng Yin's avatar
Liangsheng Yin committed
1504

1505
1506
1507
1508
1509
1510
            if len(retracted_reqs) == 0:
                # Corner case: only one request left
                raise ValueError(
                    "Failed to retract any request. No space left for only one request."
                )

1511
        self.filter_batch(keep_indices=sorted_indices)
1512

Liangsheng Yin's avatar
Liangsheng Yin committed
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
        # Reqs in batch are filtered
        total_decoded_tokens = sum(len(r.output_ids) for r in self.reqs)
        total_max_new_tokens = sum(r.sampling_params.max_new_tokens for r in self.reqs)

        new_estimate_ratio = (
            total_decoded_tokens + global_config.retract_decode_steps * len(self.reqs)
        ) / total_max_new_tokens
        new_estimate_ratio = min(1.0, new_estimate_ratio)

        return retracted_reqs, new_estimate_ratio
1523

1524
1525
1526
1527
    def prepare_encoder_info_decode(self):
        # Reset the encoder cached status
        self.encoder_cached = [True] * len(self.reqs)

Ke Bao's avatar
Ke Bao committed
1528
1529
    def prepare_for_idle(self):
        self.forward_mode = ForwardMode.IDLE
Lianmin Zheng's avatar
Lianmin Zheng committed
1530
        self.input_ids = torch.empty(0, dtype=torch.int64, device=self.device)
1531
        self.seq_lens = torch.empty(0, dtype=torch.int64, device=self.device)
1532
        self.orig_seq_lens = torch.empty(0, dtype=torch.int32, device=self.device)
Lianmin Zheng's avatar
Lianmin Zheng committed
1533
        self.out_cache_loc = torch.empty(0, dtype=torch.int64, device=self.device)
1534
        self.req_pool_indices = torch.empty(0, dtype=torch.int32, device=self.device)
1535
        self.seq_lens_sum = 0
Ke Bao's avatar
Ke Bao committed
1536
        self.extend_num_tokens = 0
1537
1538
1539
1540
        self.sampling_info = SamplingBatchInfo.from_schedule_batch(
            self,
            self.model_config.vocab_size,
        )
Ke Bao's avatar
Ke Bao committed
1541

1542
    def prepare_for_decode(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
1543
        self.forward_mode = ForwardMode.DECODE
Lianmin Zheng's avatar
Lianmin Zheng committed
1544
1545
        bs = len(self.reqs)

1546
        if self.spec_algorithm.is_eagle():
1547
1548
            # if spec decoding is used, the decode batch is prepared inside
            # `forward_batch_speculative_generation` after running draft models.
1549
            return
Liangsheng Yin's avatar
Liangsheng Yin committed
1550

1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
        if self.sampling_info.penalizer_orchestrator.is_required:
            if self.enable_overlap:
                # TODO: this can be slow, optimize this.
                delayed_output_ids = torch.tensor(
                    [
                        (
                            req.output_ids[-1]
                            if len(req.output_ids)
                            else req.origin_input_ids[-1]
                        )
                        for req in self.reqs
                    ],
                    dtype=torch.int64,
                    device=self.device,
                )
                self.sampling_info.penalizer_orchestrator.cumulate_output_tokens(
                    delayed_output_ids
                )
            else:
                self.sampling_info.penalizer_orchestrator.cumulate_output_tokens(
                    self.output_ids.to(torch.int64)
                )

Lianmin Zheng's avatar
Lianmin Zheng committed
1574
        # Update fields
1575
1576
        self.input_ids = self.output_ids
        self.output_ids = None
Lianmin Zheng's avatar
Lianmin Zheng committed
1577

1578
1579
1580
1581
        if self.model_config.is_encoder_decoder:
            locs = self.encoder_lens + self.seq_lens
            self.prepare_encoder_info_decode()
        else:
Lianmin Zheng's avatar
Lianmin Zheng committed
1582
            locs = self.seq_lens.clone()
1583

1584
        if self.enable_overlap:
1585
1586
            # Do not use in-place operations in the overlap mode
            self.seq_lens = self.seq_lens + 1
1587
            self.orig_seq_lens = self.orig_seq_lens + 1
1588
1589
1590
        else:
            # A faster in-place version
            self.seq_lens.add_(1)
1591
            self.orig_seq_lens.add_(1)
1592
        self.seq_lens_sum += bs
Lianmin Zheng's avatar
Lianmin Zheng committed
1593

tarinkk's avatar
tarinkk committed
1594
1595
1596
        # free memory
        if isinstance(self.tree_cache, SWAChunkCache):
            for req in self.reqs:
Hanming Lu's avatar
Hanming Lu committed
1597
                self.tree_cache.evict_swa(
tarinkk's avatar
tarinkk committed
1598
1599
1600
                    req, req.seqlen - 1, self.model_config.attention_chunk_size
                )

Lianmin Zheng's avatar
Lianmin Zheng committed
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
        # Allocate memory
        if self.token_to_kv_pool_allocator.page_size == 1:
            self.out_cache_loc = self.alloc_token_slots(bs)
        else:
            last_loc = self.req_to_token_pool.req_to_token[
                self.req_pool_indices, self.seq_lens - 2
            ]
            self.out_cache_loc = self.alloc_paged_token_slots_decode(
                self.seq_lens, last_loc
            )

        self.req_to_token_pool.write(
            (self.req_pool_indices, locs), self.out_cache_loc.to(torch.int32)
        )

1616
1617
    def filter_batch(
        self,
1618
        chunked_req_to_exclude: Optional[Union[Req, List[Req]]] = None,
1619
1620
1621
        keep_indices: Optional[List[int]] = None,
    ):
        if keep_indices is None:
1622
1623
1624
1625
            if isinstance(chunked_req_to_exclude, Req):
                chunked_req_to_exclude = [chunked_req_to_exclude]
            elif chunked_req_to_exclude is None:
                chunked_req_to_exclude = []
1626
1627
1628
            keep_indices = [
                i
                for i in range(len(self.reqs))
1629
                if not self.reqs[i].finished()
Lianmin Zheng's avatar
Lianmin Zheng committed
1630
                and self.reqs[i] not in chunked_req_to_exclude
1631
1632
1633
            ]

        if keep_indices is None or len(keep_indices) == 0:
1634
1635
1636
1637
            # Filter out all requests
            self.reqs = []
            return

1638
        if len(keep_indices) == len(self.reqs):
1639
1640
1641
            # No need to filter
            return

1642
1643
1644
1645
        keep_indices_device = torch.tensor(keep_indices, dtype=torch.int64).to(
            self.device, non_blocking=True
        )

1646
        if self.model_config.is_encoder_decoder:
1647
            self.encoder_lens = self.encoder_lens[keep_indices_device]
1648
1649
            self.encoder_lens_cpu = [self.encoder_lens_cpu[i] for i in keep_indices]

1650
        self.reqs = [self.reqs[i] for i in keep_indices]
1651
1652
        if self.multimodal_inputs is not None:
            self.multimodal_inputs = [self.multimodal_inputs[i] for i in keep_indices]
1653
1654
        self.req_pool_indices = self.req_pool_indices[keep_indices_device]
        self.seq_lens = self.seq_lens[keep_indices_device]
1655
        self.orig_seq_lens = self.orig_seq_lens[keep_indices_device]
1656
        self.out_cache_loc = None
1657
        self.seq_lens_sum = self.seq_lens.sum().item()
1658
        self.output_ids = self.output_ids[keep_indices_device]
1659
        self.return_logprob = any(req.return_logprob for req in self.reqs)
1660
        if self.return_logprob:
1661
            self.top_logprobs_nums = [self.top_logprobs_nums[i] for i in keep_indices]
1662
            self.token_ids_logprobs = [self.token_ids_logprobs[i] for i in keep_indices]
1663
1664
        else:
            self.top_logprobs_nums = None
1665
            self.token_ids_logprobs = None
1666

1667
        self.has_stream = any(req.stream for req in self.reqs)
1668
        self.has_grammar = any(req.grammar for req in self.reqs)
Lianmin Zheng's avatar
Lianmin Zheng committed
1669

1670
        self.sampling_info.filter_batch(keep_indices, keep_indices_device)
1671
        if self.spec_info:
1672
            self.spec_info.filter_batch(keep_indices_device)
Lianmin Zheng's avatar
Lianmin Zheng committed
1673

1674
    def merge_batch(self, other: "ScheduleBatch"):
1675
1676
1677
        # Penalizer orchestrator must be merged before Batch.reqs is merged. This is because
        # orchestrator.merge() depends on Batch.reqs during preparation of each penalizers, so it
        # needs to be called with pre-merged Batch.reqs.
1678
        self.sampling_info.merge_batch(other.sampling_info)
1679

1680
1681
1682
1683
        # Encoder-decoder infos
        if self.model_config.is_encoder_decoder:
            self.encoder_lens = torch.cat([self.encoder_lens, other.encoder_lens])
            self.encoder_lens_cpu.extend(other.encoder_lens_cpu)
1684
        self.req_pool_indices = torch.cat(
Lianmin Zheng's avatar
Lianmin Zheng committed
1685
1686
            [self.req_pool_indices, other.req_pool_indices]
        )
1687
        self.seq_lens = torch.cat([self.seq_lens, other.seq_lens])
1688
        self.orig_seq_lens = torch.cat([self.orig_seq_lens, other.orig_seq_lens])
1689
        self.out_cache_loc = None
1690
        self.seq_lens_sum += other.seq_lens_sum
1691
        if self.output_ids is not None:
1692
            self.output_ids = torch.cat([self.output_ids, other.output_ids])
1693
1694
        if self.return_logprob and other.return_logprob:
            self.top_logprobs_nums.extend(other.top_logprobs_nums)
1695
            self.token_ids_logprobs.extend(other.token_ids_logprobs)
1696
1697
        elif self.return_logprob:
            self.top_logprobs_nums.extend([0] * len(other.reqs))
1698
            self.token_ids_logprobs.extend([None] * len(other.reqs))
1699
1700
        elif other.return_logprob:
            self.top_logprobs_nums = [0] * len(self.reqs) + other.top_logprobs_nums
1701
            self.token_ids_logprobs = [None] * len(self.reqs) + other.token_ids_logprobs
1702
        self.reqs.extend(other.reqs)
1703
1704
        if self.multimodal_inputs is not None:
            self.multimodal_inputs.extend(other.multimodal_inputs)
1705

1706
1707
1708
        self.return_logprob |= other.return_logprob
        self.has_stream |= other.has_stream
        self.has_grammar |= other.has_grammar
1709
        self.return_hidden_states |= other.return_hidden_states
1710

1711
1712
1713
        if self.spec_info:
            self.spec_info.merge_batch(other.spec_info)

1714
1715
1716
    def get_model_worker_batch(
        self, seq_lens_cpu_cache: Optional[torch.Tensor] = None
    ) -> ModelWorkerBatch:
1717
        if self.forward_mode.is_decode_or_idle():
1718
            extend_seq_lens = extend_prefix_lens = extend_logprob_start_lens = None
1719
1720
1721
1722
1723
        else:
            extend_seq_lens = self.extend_lens
            extend_prefix_lens = self.prefix_lens
            extend_logprob_start_lens = self.extend_logprob_start_lens

1724
1725
1726
1727
        if self.forward_mode.is_decode_or_idle():
            attention_backend_str = global_server_args_dict["decode_attention_backend"]
        else:
            attention_backend_str = global_server_args_dict["prefill_attention_backend"]
1728
1729
        # Create seq_lens_cpu when needed
        if (
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
            attention_backend_str
            in [
                "fa3",
                "flashinfer",
                "flashmla",
                "cutlass_mla",
                "ascend",
                "trtllm_mha",
                "aiter",
            ]
1740
            or global_server_args_dict["enable_two_batch_overlap"]
1741
        ):
1742
1743
1744
1745
1746
            seq_lens_cpu = (
                seq_lens_cpu_cache
                if seq_lens_cpu_cache is not None
                else self.seq_lens.cpu()
            )
1747
1748
1749
        else:
            seq_lens_cpu = None

1750
        if self.sampling_info:
Ke Bao's avatar
Ke Bao committed
1751
1752
1753
1754
            if self.has_grammar:
                self.sampling_info.grammars = [req.grammar for req in self.reqs]
            else:
                self.sampling_info.grammars = None
1755

1756
1757
        global bid
        bid += 1
1758
        return ModelWorkerBatch(
1759
            bid=bid,
1760
1761
1762
1763
            forward_mode=self.forward_mode,
            input_ids=self.input_ids,
            req_pool_indices=self.req_pool_indices,
            seq_lens=self.seq_lens,
1764
            orig_seq_lens=self.orig_seq_lens,
1765
            out_cache_loc=self.out_cache_loc,
1766
            seq_lens_cpu=seq_lens_cpu,
1767
            seq_lens_sum=self.seq_lens_sum,
1768
1769
            return_logprob=self.return_logprob,
            top_logprobs_nums=self.top_logprobs_nums,
1770
            token_ids_logprobs=self.token_ids_logprobs,
Ke Bao's avatar
Ke Bao committed
1771
            global_num_tokens=self.global_num_tokens,
1772
            global_num_tokens_for_logprob=self.global_num_tokens_for_logprob,
1773
            is_extend_in_batch=self.is_extend_in_batch,
1774
            can_run_dp_cuda_graph=self.can_run_dp_cuda_graph,
1775
1776
            tbo_split_seq_index=self.tbo_split_seq_index,
            global_forward_mode=self.global_forward_mode,
1777
            extend_num_tokens=self.extend_num_tokens,
1778
1779
1780
            extend_seq_lens=extend_seq_lens,
            extend_prefix_lens=extend_prefix_lens,
            extend_logprob_start_lens=extend_logprob_start_lens,
1781
            multimodal_inputs=self.multimodal_inputs,
1782
1783
1784
1785
            encoder_cached=self.encoder_cached,
            encoder_lens=self.encoder_lens,
            encoder_lens_cpu=self.encoder_lens_cpu,
            encoder_out_cache_loc=self.encoder_out_cache_loc,
1786
            lora_ids=[req.lora_id for req in self.reqs],
1787
            sampling_info=self.sampling_info,
Rin Intachuen's avatar
Rin Intachuen committed
1788
            input_embeds=self.input_embeds,
woodx's avatar
woodx committed
1789
            token_type_ids=self.token_type_ids,
1790
1791
            spec_algorithm=self.spec_algorithm,
            spec_info=self.spec_info,
1792
            hicache_consumer_index=self.hicache_consumer_index,
Lianmin Zheng's avatar
Lianmin Zheng committed
1793
            capture_hidden_mode=(
1794
                CaptureHiddenMode.FULL
1795
                if self.return_hidden_states
1796
1797
1798
1799
1800
1801
1802
                else (
                    getattr(
                        self.spec_info, "capture_hidden_mode", CaptureHiddenMode.NULL
                    )
                    if self.spec_info
                    else CaptureHiddenMode.NULL
                )
Lianmin Zheng's avatar
Lianmin Zheng committed
1803
            ),
1804
            extend_input_logprob_token_ids=self.extend_input_logprob_token_ids,
1805
            launch_done=self.launch_done,
1806
1807
        )

1808
    def copy(self):
1809
        # Only contain fields that will be used by process_batch_result
1810
1811
        return ScheduleBatch(
            reqs=self.reqs,
1812
            model_config=self.model_config,
1813
            forward_mode=self.forward_mode,
1814
1815
            out_cache_loc=self.out_cache_loc,
            return_logprob=self.return_logprob,
1816
            decoding_reqs=self.decoding_reqs,
1817
            spec_algorithm=self.spec_algorithm,
1818
            enable_custom_logit_processor=self.enable_custom_logit_processor,
1819
1820
1821
1822
            global_num_tokens=self.global_num_tokens,
            global_num_tokens_for_logprob=self.global_num_tokens_for_logprob,
            can_run_dp_cuda_graph=self.can_run_dp_cuda_graph,
            is_extend_in_batch=self.is_extend_in_batch,
1823
1824
        )

Hanming Lu's avatar
Hanming Lu committed
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
    def _evict_tree_cache_if_needed(
        self,
        num_tokens: int,
    ) -> None:
        if isinstance(self.tree_cache, SWAChunkCache):
            return

        if self.is_hybrid:
            full_available_size = self.token_to_kv_pool_allocator.full_available_size()
            swa_available_size = self.token_to_kv_pool_allocator.swa_available_size()

            if full_available_size < num_tokens or swa_available_size < num_tokens:
                if self.tree_cache is not None:
                    full_num_tokens = max(0, num_tokens - full_available_size)
                    swa_num_tokens = max(0, num_tokens - swa_available_size)
                    self.tree_cache.evict(full_num_tokens, swa_num_tokens)
        else:
            if self.token_to_kv_pool_allocator.available_size() < num_tokens:
                if self.tree_cache is not None:
                    self.tree_cache.evict(num_tokens)

    def _is_available_size_sufficient(self, num_tokens: int) -> bool:
        if self.is_hybrid:
            return (
                self.token_to_kv_pool_allocator.full_available_size() >= num_tokens
                and self.token_to_kv_pool_allocator.swa_available_size() >= num_tokens
            )
        else:
            return self.token_to_kv_pool_allocator.available_size() >= num_tokens

    def _available_and_evictable_str(self) -> str:
        if self.is_hybrid:
            full_available_size = self.token_to_kv_pool_allocator.full_available_size()
            swa_available_size = self.token_to_kv_pool_allocator.swa_available_size()
            full_evictable_size = self.tree_cache.full_evictable_size()
            swa_evictable_size = self.tree_cache.swa_evictable_size()
            return (
                f"Available full tokens: {full_available_size + full_evictable_size} ({full_available_size=} + {full_evictable_size=})\n"
                f"Available swa tokens: {swa_available_size + swa_evictable_size} ({swa_available_size=} + {swa_evictable_size=})\n"
                f"Full LRU list evictable size: {self.tree_cache.full_lru_list_evictable_size()}\n"
                f"SWA LRU list evictable size: {self.tree_cache.swa_lru_list_evictable_size()}\n"
            )
        else:
            available_size = self.token_to_kv_pool_allocator.available_size()
            evictable_size = self.tree_cache.evictable_size()
            return f"Available tokens: {available_size + evictable_size} ({available_size=} + {evictable_size=})\n"

1872
1873
    def __str__(self):
        return (
1874
            f"ScheduleBatch(forward_mode={self.forward_mode.name if self.forward_mode else 'None'}, "
1875
1876
1877
            f"#req={(len(self.reqs))})"
        )

Chayenne's avatar
Chayenne committed
1878

1879
@dataclasses.dataclass
1880
class ModelWorkerBatch:
1881
1882
    # The batch id
    bid: int
1883
1884
1885
    # The forward mode
    forward_mode: ForwardMode
    # The input ids
1886
    input_ids: torch.Tensor
1887
1888
1889
1890
    # The indices of requests in the req_to_token_pool
    req_pool_indices: torch.Tensor
    # The sequence length
    seq_lens: torch.Tensor
1891
    # The indices of output tokens in the token_to_kv_pool_allocator
1892
    out_cache_loc: torch.Tensor
1893
1894
    # The sequence length tensor on CPU
    seq_lens_cpu: Optional[torch.Tensor]
1895
1896
    seq_lens_sum: int

1897
1898
1899
    # For logprob
    return_logprob: bool
    top_logprobs_nums: Optional[List[int]]
1900
    token_ids_logprobs: Optional[List[List[int]]]
1901

Ke Bao's avatar
Ke Bao committed
1902
1903
    # For DP attention
    global_num_tokens: Optional[List[int]]
1904
    global_num_tokens_for_logprob: Optional[List[int]]
1905
    is_extend_in_batch: bool
1906
    can_run_dp_cuda_graph: bool
1907
1908
    tbo_split_seq_index: Optional[int]
    global_forward_mode: Optional[ForwardMode]
Ke Bao's avatar
Ke Bao committed
1909

1910
    # For extend
1911
    extend_num_tokens: Optional[int]
1912
1913
1914
    extend_seq_lens: Optional[List[int]]
    extend_prefix_lens: Optional[List[int]]
    extend_logprob_start_lens: Optional[List[int]]
1915
    extend_input_logprob_token_ids: Optional[torch.Tensor]
1916
1917

    # For multimodal
Mick's avatar
Mick committed
1918
    multimodal_inputs: Optional[List[MultimodalInputs]]
1919

1920
1921
1922
1923
1924
1925
    # For encoder-decoder
    encoder_cached: Optional[List[bool]]
    encoder_lens: Optional[torch.Tensor]
    encoder_lens_cpu: Optional[List[int]]
    encoder_out_cache_loc: Optional[torch.Tensor]

1926
    # For LoRA
1927
    lora_ids: Optional[List[str]]
1928
1929
1930

    # Sampling info
    sampling_info: SamplingBatchInfo
1931

1932
1933
1934
    # The original sequence lengths, Qwen-1M related
    orig_seq_lens: Optional[torch.Tensor] = None

Rin Intachuen's avatar
Rin Intachuen committed
1935
    # The input Embeds
Cheng Wan's avatar
Cheng Wan committed
1936
    input_embeds: Optional[torch.Tensor] = None
Rin Intachuen's avatar
Rin Intachuen committed
1937

woodx's avatar
woodx committed
1938
1939
1940
    # For corss-encoder model
    token_type_ids: Optional[torch.Tensor] = None

1941
    # Speculative decoding
1942
    spec_algorithm: SpeculativeAlgorithm = None
1943
1944
    spec_info: Optional[Union[EagleVerifyInput, EagleDraftInput]] = None
    # If set, the output of the batch contains the hidden states of the run.
Lianmin Zheng's avatar
Lianmin Zheng committed
1945
    capture_hidden_mode: CaptureHiddenMode = None
1946
    hicache_consumer_index: int = 0
1947

1948
1949
1950
    # Overlap event
    launch_done: Optional[threading.Event] = None

1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968

@triton.jit
def write_req_to_token_pool_triton(
    req_to_token_ptr,  # [max_batch, max_context_len]
    req_pool_indices,
    pre_lens,
    seq_lens,
    extend_lens,
    out_cache_loc,
    req_to_token_ptr_stride: tl.constexpr,
):
    BLOCK_SIZE: tl.constexpr = 512
    pid = tl.program_id(0)

    req_pool_index = tl.load(req_pool_indices + pid)
    pre_len = tl.load(pre_lens + pid)
    seq_len = tl.load(seq_lens + pid)

Lianmin Zheng's avatar
Lianmin Zheng committed
1969
1970
    # NOTE: This can be slow for large bs
    cumsum_start = tl.cast(0, tl.int64)
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
    for i in range(pid):
        cumsum_start += tl.load(extend_lens + i)

    num_loop = tl.cdiv(seq_len - pre_len, BLOCK_SIZE)
    for i in range(num_loop):
        offset = tl.arange(0, BLOCK_SIZE) + i * BLOCK_SIZE
        mask = offset < (seq_len - pre_len)
        value = tl.load(out_cache_loc + cumsum_start + offset, mask=mask)
        tl.store(
            req_to_token_ptr
            + req_pool_index * req_to_token_ptr_stride
            + offset
            + pre_len,
            value,
            mask=mask,
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
1987
1988


1989
1990
1991
1992
1993
def get_last_loc(
    req_to_token: torch.Tensor,
    req_pool_indices_tensor: torch.Tensor,
    prefix_lens_tensor: torch.Tensor,
) -> torch.Tensor:
1994
1995
1996
1997
    if (
        global_server_args_dict["attention_backend"] != "ascend"
        and global_server_args_dict["attention_backend"] != "torch_native"
    ):
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
        impl = get_last_loc_triton
    else:
        impl = get_last_loc_torch

    return impl(req_to_token, req_pool_indices_tensor, prefix_lens_tensor)


def get_last_loc_torch(
    req_to_token: torch.Tensor,
    req_pool_indices_tensor: torch.Tensor,
    prefix_lens_tensor: torch.Tensor,
) -> torch.Tensor:
Lianmin Zheng's avatar
Lianmin Zheng committed
2010
2011
2012
2013
2014
    return torch.where(
        prefix_lens_tensor > 0,
        req_to_token[req_pool_indices_tensor, prefix_lens_tensor - 1],
        torch.full_like(prefix_lens_tensor, -1),
    )
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060


@triton.jit
def get_last_loc_kernel(
    req_to_token,
    req_pool_indices_tensor,
    prefix_lens_tensor,
    result,
    num_tokens,
    req_to_token_stride,
    BLOCK_SIZE: tl.constexpr,
):
    pid = tl.program_id(0)
    offset = tl.arange(0, BLOCK_SIZE) + pid * BLOCK_SIZE
    mask = offset < num_tokens

    prefix_lens = tl.load(prefix_lens_tensor + offset, mask=mask, other=0)
    req_pool_indices = tl.load(req_pool_indices_tensor + offset, mask=mask, other=0)

    token_mask = prefix_lens > 0
    token_index = req_pool_indices * req_to_token_stride + (prefix_lens - 1)
    tokens = tl.load(req_to_token + token_index, mask=token_mask, other=-1)

    tl.store(result + offset, tokens, mask=mask)


def get_last_loc_triton(
    req_to_token: torch.Tensor,
    req_pool_indices_tensor: torch.Tensor,
    prefix_lens_tensor: torch.Tensor,
) -> torch.Tensor:
    BLOCK_SIZE = 256
    num_tokens = prefix_lens_tensor.shape[0]
    result = torch.empty_like(prefix_lens_tensor)
    grid = (triton.cdiv(num_tokens, BLOCK_SIZE),)

    get_last_loc_kernel[grid](
        req_to_token,
        req_pool_indices_tensor,
        prefix_lens_tensor,
        result,
        num_tokens,
        req_to_token.stride(0),
        BLOCK_SIZE,
    )
    return result