"vscode:/vscode.git/clone" did not exist on "8340e19daad4bafe24125150a8c56161838086fa"
schedule_batch.py 75.3 KB
Newer Older
1
2
from __future__ import annotations

3
4
5
6
7
8
9
10
11
12
13
14
15
# Copyright 2023-2024 SGLang Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
16
17
18
19
20
21
22
23
24
25
"""
Store information about requests and batches.

The following is the flow of data structures for a batch:

ScheduleBatch -> ModelWorkerBatch -> ForwardBatch

- ScheduleBatch is managed by `scheduler.py::Scheduler`.
  It contains high-level scheduling data. Most of the data is on the CPU.
- ModelWorkerBatch is managed by `tp_worker.py::TpModelWorker`.
26
27
  It is a subset of `ScheduleBatch` that only contains data related to the model forward on GPU.
  It will be transformed from CPU scheduler to GPU model runner.
28
29
- ForwardBatch is managed by `model_runner.py::ModelRunner`.
  It contains low-level tensor data. Most of the data consists of GPU tensors.
Lianmin Zheng's avatar
Lianmin Zheng committed
30
31

TODO(lmzheng): ModelWorkerBatch seems a bit redundant and we consider removing it in the future.
32
"""
Lianmin Zheng's avatar
Lianmin Zheng committed
33

34
import copy
35
import dataclasses
Ying Sheng's avatar
Ying Sheng committed
36
import logging
37
import threading
Lianmin Zheng's avatar
Lianmin Zheng committed
38
from enum import Enum, auto
39
from http import HTTPStatus
40
from itertools import chain
41
from typing import TYPE_CHECKING, Any, List, Optional, Set, Tuple, Union
Lianmin Zheng's avatar
Lianmin Zheng committed
42

43
import numpy as np
Lianmin Zheng's avatar
Lianmin Zheng committed
44
import torch
45
46
import triton
import triton.language as tl
47

Liangsheng Yin's avatar
Liangsheng Yin committed
48
from sglang.global_config import global_config
49
from sglang.srt.constrained.base_grammar_backend import BaseGrammarObject
50
from sglang.srt.disaggregation.base import BaseKVSender
Byron Hsu's avatar
Byron Hsu committed
51
52
53
from sglang.srt.disaggregation.decode_schedule_batch_mixin import (
    ScheduleBatchDisaggregationDecodeMixin,
)
54
from sglang.srt.distributed.parallel_state import get_tensor_model_parallel_rank
55
from sglang.srt.layers.moe import is_tbo_enabled
Hanming Lu's avatar
Hanming Lu committed
56
57
58
59
from sglang.srt.mem_cache.allocator import (
    BaseTokenToKVPoolAllocator,
    SWATokenToKVPoolAllocator,
)
60
from sglang.srt.mem_cache.base_prefix_cache import BasePrefixCache
tarinkk's avatar
tarinkk committed
61
from sglang.srt.mem_cache.chunk_cache import ChunkCache, SWAChunkCache
62
from sglang.srt.mem_cache.lora_radix_cache import LoRAKey, LoRARadixCache
63
from sglang.srt.mem_cache.memory_pool import ReqToTokenPool
Hanming Lu's avatar
Hanming Lu committed
64
from sglang.srt.mem_cache.swa_radix_cache import SWARadixCache
65
from sglang.srt.metrics.collector import TimeStats
Lianmin Zheng's avatar
Lianmin Zheng committed
66
from sglang.srt.model_executor.forward_batch_info import CaptureHiddenMode, ForwardMode
67
from sglang.srt.sampling.sampling_batch_info import SamplingBatchInfo
68
from sglang.srt.sampling.sampling_params import SamplingParams
69
from sglang.srt.server_args import ServerArgs
70
from sglang.srt.utils import flatten_nested_list, support_triton
Liangsheng Yin's avatar
Liangsheng Yin committed
71

72
if TYPE_CHECKING:
Cheng Wan's avatar
Cheng Wan committed
73
    from sglang.srt.configs.model_config import ModelConfig
74
75
76
    from sglang.srt.speculative.eagle_utils import EagleDraftInput, EagleVerifyInput
    from sglang.srt.speculative.spec_info import SpeculativeAlgorithm

Liangsheng Yin's avatar
Liangsheng Yin committed
77
INIT_INCREMENTAL_DETOKENIZATION_OFFSET = 5
Lianmin Zheng's avatar
Lianmin Zheng committed
78

79
80
GLOBAL_SERVER_ARGS_KEYS = [
    "attention_backend",
81
    "mm_attention_backend",
82
83
84
85
86
    "debug_tensor_dump_inject",
    "debug_tensor_dump_output_folder",
    "chunked_prefill_size",
    "device",
    "disable_chunked_prefix_cache",
87
    "disable_flashinfer_cutlass_moe_fp4_allgather",
88
89
    "disable_radix_cache",
    "enable_dp_lm_head",
90
    "enable_flashinfer_allreduce_fusion",
91
92
93
    "moe_dense_tp_size",
    "ep_dispatch_algorithm",
    "ep_num_redundant_experts",
94
95
96
97
98
99
    "enable_nan_detection",
    "flashinfer_mla_disable_ragged",
    "max_micro_batch_size",
    "disable_shared_experts_fusion",
    "sampling_backend",
    "speculative_accept_threshold_single",
100
    "speculative_accept_threshold_acc",
101
102
    "torchao_config",
    "triton_attention_reduce_in_fp32",
103
    "num_reserved_decode_tokens",
104
    "weight_loader_disable_mmap",
105
    "enable_multimodal",
106
    "enable_symm_mem",
107
    "quantization",
Lianmin Zheng's avatar
Lianmin Zheng committed
108
    "enable_custom_logit_processor",
109
110
]

111
# Put some global args for easy access
112
global_server_args_dict = {k: getattr(ServerArgs, k) for k in GLOBAL_SERVER_ARGS_KEYS}
113

Ying Sheng's avatar
Ying Sheng committed
114
115
116
logger = logging.getLogger(__name__)


117
118
119
class BaseFinishReason:
    def __init__(self, is_error: bool = False):
        self.is_error = is_error
Lianmin Zheng's avatar
Lianmin Zheng committed
120

121
    def to_json(self):
122
        raise NotImplementedError()
123
124
125


class FINISH_MATCHED_TOKEN(BaseFinishReason):
Mingyi's avatar
Mingyi committed
126
    def __init__(self, matched: Union[int, List[int]]):
127
128
129
        super().__init__()
        self.matched = matched

130
131
132
133
134
    def to_json(self):
        return {
            "type": "stop",  # to match OpenAI API's return value
            "matched": self.matched,
        }
135
136


137
138
class FINISH_MATCHED_STR(BaseFinishReason):
    def __init__(self, matched: str):
139
        super().__init__()
140
        self.matched = matched
141

142
143
144
145
146
    def to_json(self):
        return {
            "type": "stop",  # to match OpenAI API's return value
            "matched": self.matched,
        }
147
148


149
150
class FINISH_LENGTH(BaseFinishReason):
    def __init__(self, length: int):
151
        super().__init__()
152
        self.length = length
153

154
155
156
157
158
    def to_json(self):
        return {
            "type": "length",  # to match OpenAI API's return value
            "length": self.length,
        }
159
160
161


class FINISH_ABORT(BaseFinishReason):
Lianmin Zheng's avatar
Lianmin Zheng committed
162
    def __init__(self, message=None, status_code=None, err_type=None):
163
        super().__init__(is_error=True)
Lianmin Zheng's avatar
Lianmin Zheng committed
164
        self.message = message or "Aborted"
165
166
        self.status_code = status_code
        self.err_type = err_type
167

168
169
170
    def to_json(self):
        return {
            "type": "abort",
Lianmin Zheng's avatar
Lianmin Zheng committed
171
            "message": self.message,
172
173
            "status_code": self.status_code,
            "err_type": self.err_type,
174
        }
175

Lianmin Zheng's avatar
Lianmin Zheng committed
176

Mick's avatar
Mick committed
177
178
179
180
181
182
class Modality(Enum):
    IMAGE = auto()
    MULTI_IMAGES = auto()
    VIDEO = auto()
    AUDIO = auto()

183
184
185
186
187
188
189
190
191
    @staticmethod
    def from_str(modality_str: str):
        try:
            return Modality[modality_str.upper()]
        except KeyError:
            raise ValueError(
                f"Invalid modality string: {modality_str}. Valid modalities are: {[m.name for m in Modality]}"
            )

192
193
194
195
    @staticmethod
    def all():
        return [Modality.IMAGE, Modality.VIDEO, Modality.AUDIO]

Mick's avatar
Mick committed
196

197
@dataclasses.dataclass
Mick's avatar
Mick committed
198
199
class MultimodalDataItem:
    """
200
201
202
    One MultimodalDataItem contains all inputs for one modality.
    For example, if there are 3 images and 1 audio inputs, there will be 2 MultimodalDataItem.
    One for images and one for audio.
203

204
    We put the common fields first and the model-specific fields in model_specific_data.
Mick's avatar
Mick committed
205
    """
206

Mick's avatar
Mick committed
207
208
209
    modality: Modality
    hash: int = None
    pad_value: int = None
210
    offsets: Optional[list] = None
Mick's avatar
Mick committed
211

212
213
    # the raw features returned by processor, e.g. pixel_values or audio_features
    feature: Union[torch.Tensor, np.ndarray] = None
Mick's avatar
Mick committed
214
215
    # the precomputed embeddings, passed as final encoder embeddings
    # One and only one of the feature and precomputed_embeddings will be empty
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
    precomputed_embeddings: Optional[Union[torch.Tensor, np.ndarray]] = None

    # Model-specific data stored in a dictionary
    model_specific_data: dict[str, Any] = dataclasses.field(default_factory=dict)

    def __getattr__(self, name: str):
        if (
            "model_specific_data" in self.__dict__
            and name in self.__dict__["model_specific_data"]
        ):
            return self.__dict__["model_specific_data"][name]
        else:
            raise AttributeError(
                f"'{self.__class__.__name__}' object has no attribute '{name}'"
            )
Mick's avatar
Mick committed
231

232
233
234
235
236
    def __setitem__(self, key: str, value: Any):
        if key in self.__dict__:
            self.__dict__[key] = value
        else:
            self.model_specific_data[key] = value
237

238
239
    def set(self, key: str, value: Any):
        self.__setitem__(key, value)
240

Mick's avatar
Mick committed
241
242
243
244
245
246
247
248
    @staticmethod
    def is_empty_list(l):
        if l is None:
            return True
        return len([item for item in flatten_nested_list(l) if item is not None]) == 0

    def set_pad_value(self):
        """
Mick's avatar
Mick committed
249
        Set the pad value after first hashing the data
Mick's avatar
Mick committed
250
        """
251
        from sglang.srt.managers.mm_utils import hash_feature
Mick's avatar
Mick committed
252

253
        if self.hash is None:
254
255
            if self.feature is not None:
                hashed_feature = self.feature
256
            else:
257
                hashed_feature = self.precomputed_embeddings
258
            self.hash = hash_feature(hashed_feature)
Mick's avatar
Mick committed
259
260
261
        assert self.hash is not None
        self.pad_value = self.hash % (1 << 30)

262
263
264
    def is_modality(self, modality: Modality) -> bool:
        return self.modality == modality

Mick's avatar
Mick committed
265
    def is_audio(self):
266
        return self.modality == Modality.AUDIO
Mick's avatar
Mick committed
267
268

    def is_image(self):
269
        return self.modality in [Modality.IMAGE, Modality.MULTI_IMAGES]
Mick's avatar
Mick committed
270
271

    def is_video(self):
272
        return self.modality == Modality.VIDEO
Mick's avatar
Mick committed
273

274
275
276
    def is_valid(self) -> bool:
        return self.is_image() or self.is_video() or self.is_audio()

Mick's avatar
Mick committed
277
278
279
280
    def validate(self):
        ...
        # TODO

281
282
283
284
285
286
287
288
289
290
    @staticmethod
    def from_dict(obj: dict):
        kwargs = dict(obj)
        modality = kwargs.pop("modality")
        if isinstance(modality, str):
            modality = Modality[modality]
        ret = MultimodalDataItem(modality=modality, **kwargs)
        ret.validate()
        return ret

291
    def merge(self, other):
292
        self.feature += other.feature
293
        self.offsets += other.offsets
294
295
296
        self.hash = hash((self.hash, other.hash))
        self.set_pad_value()

Mick's avatar
Mick committed
297
298
299
300
301
302
303

@dataclasses.dataclass
class MultimodalInputs:
    """The multimodal data related inputs."""

    # items of data
    mm_items: List[MultimodalDataItem]
304
    image_pad_len: Optional[list] = None
305
    num_image_tokens: Optional[int] = None
Liangsheng Yin's avatar
Liangsheng Yin committed
306

Mick's avatar
Mick committed
307
    # image
Mick's avatar
Mick committed
308
    im_token_id: Optional[int] = None
309
310
311
312
    im_start_id: Optional[int] = None
    im_end_id: Optional[int] = None
    slice_start_id: Optional[int] = None
    slice_end_id: Optional[int] = None
Mick's avatar
Mick committed
313
314
315

    # video
    video_token_id: Optional[int] = None
Mick's avatar
Mick committed
316

Mick's avatar
Mick committed
317
    # audio
318
319
320
    audio_token_id: Optional[int] = None
    audio_start_id: Optional[int] = None
    audio_end_id: Optional[int] = None
Mick's avatar
Mick committed
321

322
323
324
325
    # QWen2-VL related
    mrope_positions: Optional[torch.Tensor] = None
    mrope_position_delta: Optional[torch.Tensor] = None

Liangsheng Yin's avatar
Liangsheng Yin committed
326
    @staticmethod
327
    def from_dict(obj: dict):
Mick's avatar
Mick committed
328
        ret = MultimodalInputs(
Mick's avatar
Mick committed
329
            mm_items=obj["mm_items"],
Liangsheng Yin's avatar
Liangsheng Yin committed
330
        )
331

Mick's avatar
Mick committed
332
        assert isinstance(ret.mm_items, list)
333
        ret.mm_items = [item for item in ret.mm_items if item.is_valid()]
Mick's avatar
Mick committed
334
335
        for item in ret.mm_items:
            item.set_pad_value()
336
337

        optional_args = [
338
339
            "mrope_positions",
            "mrope_position_delta",
340
            "im_token_id",
Mick's avatar
Mick committed
341
342
            "im_start_id",
            "im_end_id",
343
            "video_token_id",
Mick's avatar
Mick committed
344
345
            "slice_start_id",
            "slice_end_id",
Mick's avatar
Mick committed
346
347
            "audio_start_id",
            "audio_end_id",
348
            "audio_token_id",
349
350
351
352
353
        ]
        for arg in optional_args:
            if arg in obj:
                setattr(ret, arg, obj[arg])

Liangsheng Yin's avatar
Liangsheng Yin committed
354
355
        return ret

Mick's avatar
Mick committed
356
    def contains_image_inputs(self) -> bool:
Mick's avatar
Mick committed
357
        return any(item.is_image() for item in self.mm_items)
Mick's avatar
Mick committed
358

359
360
361
    def contains_video_inputs(self) -> bool:
        return any(item.is_video() for item in self.mm_items)

Mick's avatar
Mick committed
362
    def contains_audio_inputs(self) -> bool:
Mick's avatar
Mick committed
363
364
        return any(item.is_audio() for item in self.mm_items)

365
366
    def contains_mm_input(self) -> bool:
        return any(True for item in self.mm_items if item.is_valid())
Mick's avatar
Mick committed
367
368

    def merge(self, other: MultimodalInputs):
369
370
371
        """
        merge image inputs when requests are being merged
        """
372

373
        # args needed to be merged
374
        optional_args = [
Mick's avatar
Mick committed
375
            "mm_items",
376
            "image_pad_len",
377
378
        ]
        for arg in optional_args:
379
380
381
            self_arg = getattr(self, arg, None)
            if self_arg is not None:
                setattr(self, arg, self_arg + getattr(other, arg))
382
383
384
385
386
387
388
389
390
391

        mrope_positions = self.mrope_positions
        if mrope_positions is not None:
            if other.mrope_positions is None:
                self.mrope_positions = mrope_positions
            else:
                self.mrope_positions = torch.cat(
                    [self.mrope_positions, other.mrope_positions], dim=1
                )

392
393
394
395
396
397
398
399
        mrope_position_delta = self.mrope_position_delta
        if mrope_position_delta is not None:
            if other.mrope_position_delta is None:
                self.mrope_position_delta = mrope_position_delta
            else:
                self.mrope_position_delta = torch.cat(
                    [self.mrope_position_delta, other.mrope_position_delta], dim=0
                )
400
401
402
403
404
405

        for key, val in other.__dict__.items():
            if "_id" in key:
                # set token_ids
                if getattr(self, key, None) is None:
                    setattr(self, key, getattr(other, key, None))
406
        # other args would be kept intact
407

Liangsheng Yin's avatar
Liangsheng Yin committed
408

Lianmin Zheng's avatar
Lianmin Zheng committed
409
class Req:
410
    """The input and output status of a request."""
411

412
413
414
415
    def __init__(
        self,
        rid: str,
        origin_input_text: str,
416
        origin_input_ids: List[int],
417
        sampling_params: SamplingParams,
Lianmin Zheng's avatar
Lianmin Zheng committed
418
419
        return_logprob: bool = False,
        top_logprobs_num: int = 0,
420
        token_ids_logprob: List[int] = None,
Lianmin Zheng's avatar
Lianmin Zheng committed
421
        stream: bool = False,
422
        origin_input_ids_unpadded: Optional[Tuple[int]] = None,
423
        lora_id: Optional[str] = None,
Rin Intachuen's avatar
Rin Intachuen committed
424
        input_embeds: Optional[List[List[float]]] = None,
woodx's avatar
woodx committed
425
        token_type_ids: List[int] = None,
426
        session_id: Optional[str] = None,
427
        custom_logit_processor: Optional[str] = None,
428
        return_hidden_states: bool = False,
429
        eos_token_ids: Optional[Set[int]] = None,
430
        bootstrap_host: Optional[str] = None,
431
        bootstrap_port: Optional[int] = None,
432
        bootstrap_room: Optional[int] = None,
433
        data_parallel_rank: Optional[int] = None,
434
        vocab_size: Optional[int] = None,
435
    ):
436
        # Input and output info
Lianmin Zheng's avatar
Lianmin Zheng committed
437
        self.rid = rid
Liangsheng Yin's avatar
Liangsheng Yin committed
438
        self.origin_input_text = origin_input_text
439
440
441
442
443
        self.origin_input_ids_unpadded = (
            origin_input_ids_unpadded
            if origin_input_ids_unpadded
            else origin_input_ids  # Before image padding
        )
Liangsheng Yin's avatar
Liangsheng Yin committed
444
        self.origin_input_ids = origin_input_ids
445
446
447
        # Each decode stage's output ids
        self.output_ids = []
        # fill_ids = origin_input_ids + output_ids. Updated if chunked.
448
        self.fill_ids = []
449
        self.session_id = session_id
Lianmin Zheng's avatar
Lianmin Zheng committed
450
        self.input_embeds = input_embeds
451

woodx's avatar
woodx committed
452
453
454
        # for corss-endoder model
        self.token_type_ids = token_type_ids

tarinkk's avatar
tarinkk committed
455
456
457
        # The length of KV that have been removed in local attention chunked prefill
        self.evicted_seqlen_local = 0

Lianmin Zheng's avatar
Lianmin Zheng committed
458
        # Sampling info
459
460
461
462
463
        if isinstance(sampling_params.custom_params, dict):
            sampling_params = copy.copy(sampling_params)
            sampling_params.custom_params = sampling_params.custom_params | {
                "__req__": self
            }
464
        self.sampling_params = sampling_params
465
        self.custom_logit_processor = custom_logit_processor
466
        self.return_hidden_states = return_hidden_states
467
        self.lora_id = lora_id
Liangsheng Yin's avatar
Liangsheng Yin committed
468

469
        # Memory pool info
470
        self.req_pool_idx: Optional[int] = None
471

472
473
474
        # Check finish
        self.tokenizer = None
        self.finished_reason = None
Lianmin Zheng's avatar
Lianmin Zheng committed
475
476
        # Whether this request has finished output
        self.finished_output = None
477
478
        # If we want to abort the request in the middle of the event loop, set this to true
        # Note: We should never set finished_reason in the middle, the req will get filtered and never respond
479
        self.to_abort = False
Lianmin Zheng's avatar
Lianmin Zheng committed
480
        # This carries the error message for `.to_abort` and will be attached to the finished_reason at the end of the event loop
Lianmin Zheng's avatar
Lianmin Zheng committed
481
        self.to_abort_message: str = None
Lianmin Zheng's avatar
Lianmin Zheng committed
482
        self.stream = stream
483
        self.eos_token_ids = eos_token_ids
484
        self.vocab_size = vocab_size
485

486
        # For incremental decoding
487
488
489
490
491
492
493
494
        # ----- | --------- read_ids -------|
        # ----- |   surr_ids  |
        # xxxxx | xxxxxxxxxxx | xxxxxxxxxxx |
        # ----- ^ ----------- ^ ----------- ^
        # ----- 1 ----------- 2 ----------- 3
        # 1: surr_offset
        # 2: read_offset
        # 3: last token
Liangsheng Yin's avatar
Liangsheng Yin committed
495
496
        self.surr_offset = None  # Surrounding offset to defeat the cleanup algorithm
        self.read_offset = None
Lianmin Zheng's avatar
Lianmin Zheng committed
497
        self.decoded_text = ""
498

499
        # For multimodal inputs
Mick's avatar
Mick committed
500
        self.multimodal_inputs: Optional[MultimodalInputs] = None
501

502
        # Prefix info
503
        # The indices to kv cache for the shared prefix.
504
        self.prefix_indices: torch.Tensor = []
505
        # Number of tokens to run prefill.
506
        self.extend_input_len = 0
507
508
        # The relative logprob_start_len in an extend batch
        self.extend_logprob_start_len = 0
509
510
511
        self.last_node: Any = None
        self.last_host_node: Any = None
        self.host_hit_length = 0
Hanming Lu's avatar
Hanming Lu committed
512
513
        # The node to lock until for swa radix tree lock ref
        self.swa_uuid_for_lock: Optional[int] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
514

515
516
517
518
        # Whether or not if it is chunked. It increments whenever
        # it is chunked, and decrement whenever chunked request is
        # processed.
        self.is_chunked = 0
519

520
521
522
        # For retraction
        self.is_retracted = False

523
524
525
526
527
528
529
        # Incremental streamining
        self.send_token_offset: int = 0
        self.send_decode_id_offset: int = 0
        # TODO (Byron): send_output_token_logprobs_offset and send_decode_id_offset can be different in disaggregation mode
        # because the decode server does not have the first output token logprobs
        self.send_output_token_logprobs_offset: int = 0

530
        # Logprobs (arguments)
Lianmin Zheng's avatar
Lianmin Zheng committed
531
        self.return_logprob = return_logprob
532
        # Start index to compute logprob from.
533
        self.logprob_start_len = 0
Lianmin Zheng's avatar
Lianmin Zheng committed
534
        self.top_logprobs_num = top_logprobs_num
535
        self.token_ids_logprob = token_ids_logprob
Lianmin Zheng's avatar
Lianmin Zheng committed
536
537
        self.temp_scaled_logprobs = False
        self.top_p_normalized_logprobs = False
538

539
        # Logprobs (return values)
540
541
        # True means the input logprob has been already sent to detokenizer.
        self.input_logprob_sent: bool = False
542
543
544
545
        self.input_token_logprobs_val: Optional[List[float]] = None
        self.input_token_logprobs_idx: Optional[List[int]] = None
        self.input_top_logprobs_val: Optional[List[float]] = None
        self.input_top_logprobs_idx: Optional[List[int]] = None
546
547
548
549
550
551
552
553
        self.input_token_ids_logprobs_val: Optional[List[float]] = None
        self.input_token_ids_logprobs_idx: Optional[List[int]] = None
        # Temporary holder to store input_token_logprobs.
        self.input_token_logprobs: Optional[List[Tuple[int]]] = None
        self.temp_input_top_logprobs_val: Optional[List[torch.Tensor]] = None
        self.temp_input_top_logprobs_idx: Optional[List[int]] = None
        self.temp_input_token_ids_logprobs_val: Optional[List[float]] = None
        self.temp_input_token_ids_logprobs_idx: Optional[List[int]] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
554
555

        if return_logprob:
556
            # shape: (bs, 1)
Lianmin Zheng's avatar
Lianmin Zheng committed
557
558
            self.output_token_logprobs_val = []
            self.output_token_logprobs_idx = []
559
            # shape: (bs, k)
Lianmin Zheng's avatar
Lianmin Zheng committed
560
561
            self.output_top_logprobs_val = []
            self.output_top_logprobs_idx = []
562
563
            self.output_token_ids_logprobs_val = []
            self.output_token_ids_logprobs_idx = []
Lianmin Zheng's avatar
Lianmin Zheng committed
564
565
566
        else:
            self.output_token_logprobs_val = self.output_token_logprobs_idx = (
                self.output_top_logprobs_val
567
568
569
            ) = self.output_top_logprobs_idx = self.output_token_ids_logprobs_val = (
                self.output_token_ids_logprobs_idx
            ) = None
570
        self.hidden_states: List[List[float]] = []
571
        self.hidden_states_tensor = None  # Note: use tensor instead of list to transfer hidden_states when PD + MTP
572

573
        # Embedding (return values)
574
        self.embedding = None
Lianmin Zheng's avatar
Lianmin Zheng committed
575

576
        # Constrained decoding
577
        self.grammar: Optional[BaseGrammarObject] = None
578
        self.grammar_wait_ct = 0
Liangsheng Yin's avatar
Liangsheng Yin committed
579

580
        # The number of cached tokens that were already cached in the KV cache
581
        self.cached_tokens = 0
582
        self.already_computed = 0
583

584
585
586
        # The number of verification forward passes in the speculative decoding.
        # This is used to compute the average acceptance length per request.
        self.spec_verify_ct = 0
587
588
589
590
591
592

        # For metrics
        self.time_stats: TimeStats = TimeStats()
        self.has_log_time_stats: bool = False
        self.queue_time_start = None
        self.queue_time_end = None
593

Byron Hsu's avatar
Byron Hsu committed
594
        # For disaggregation
595
        self.bootstrap_host: str = bootstrap_host
596
        self.bootstrap_port: Optional[int] = bootstrap_port
597
        self.bootstrap_room: Optional[int] = bootstrap_room
598
        self.disagg_kv_sender: Optional[BaseKVSender] = None
Byron Hsu's avatar
Byron Hsu committed
599

600
601
602
        # For data parallel rank routing
        self.data_parallel_rank: Optional[int] = data_parallel_rank

Byron Hsu's avatar
Byron Hsu committed
603
604
605
606
607
608
609
        # the start index of the sent kv cache
        # We want to send it chunk by chunk for chunked prefill.
        # After every chunk forward, we do the following:
        # kv_send(req.input_ids[req.start_send_idx:len(req.fill_ids)])
        # start_send_idx = len(req.fill_ids)
        self.start_send_idx: int = 0

610
611
612
613
        # For overlap schedule, we delay the kv transfer until `process_batch_result_disagg_prefill` rather than `process_prefill_chunk` in non-overlap
        # This is because kv is not ready in `process_prefill_chunk`.
        # We use `tmp_end_idx` to store the end index of the kv cache to send.
        self.tmp_end_idx: int = -1
Lianmin Zheng's avatar
Lianmin Zheng committed
614
        self.metadata_buffer_index: int = -1
615

616
617
618
619
    @property
    def seqlen(self):
        return len(self.origin_input_ids) + len(self.output_ids)

620
    def extend_image_inputs(self, image_inputs):
Mick's avatar
Mick committed
621
622
        if self.multimodal_inputs is None:
            self.multimodal_inputs = image_inputs
623
        else:
Mick's avatar
Mick committed
624
            self.multimodal_inputs.merge(image_inputs)
625

626
    def finished(self) -> bool:
Lianmin Zheng's avatar
Lianmin Zheng committed
627
        # Whether request reached finished condition
628
629
        return self.finished_reason is not None

630
631
632
633
    def init_next_round_input(
        self,
        tree_cache: Optional[BasePrefixCache] = None,
    ):
634
        self.fill_ids = self.origin_input_ids + self.output_ids
635
        if tree_cache is not None:
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
            if isinstance(tree_cache, LoRARadixCache):
                (
                    self.prefix_indices,
                    self.last_node,
                    self.last_host_node,
                    self.host_hit_length,
                ) = tree_cache.match_prefix_with_lora_id(
                    key=LoRAKey(
                        lora_id=self.lora_id, token_ids=self.adjust_max_prefix_ids()
                    ),
                )
            else:
                (
                    self.prefix_indices,
                    self.last_node,
                    self.last_host_node,
                    self.host_hit_length,
                ) = tree_cache.match_prefix(
                    key=self.adjust_max_prefix_ids(),
                )
656
        self.extend_input_len = len(self.fill_ids) - len(self.prefix_indices)
657

658
    def adjust_max_prefix_ids(self):
659
660
        self.fill_ids = self.origin_input_ids + self.output_ids
        input_len = len(self.fill_ids)
661
662
663
664

        # FIXME: To work around some bugs in logprob computation, we need to ensure each
        # request has at least one token. Later, we can relax this requirement and use `input_len`.
        max_prefix_len = input_len - 1
Liangsheng Yin's avatar
Liangsheng Yin committed
665
666
667
668
669

        if self.sampling_params.max_new_tokens > 0:
            # Need at least one token to compute logits
            max_prefix_len = min(max_prefix_len, input_len - 1)

670
        if self.return_logprob:
671
            max_prefix_len = min(max_prefix_len, self.logprob_start_len)
672

673
        max_prefix_len = max(max_prefix_len, 0)
674
        return self.fill_ids[:max_prefix_len]
675

Liangsheng Yin's avatar
Liangsheng Yin committed
676
    # Based on https://github.com/vllm-project/vllm/blob/7a64d24aad69e4d2548aa0bf528d9fe63428ab01/vllm/transformers_utils/detokenizer.py#L194-L313
677
    def init_incremental_detokenize(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
678
679
680
681
682
683
684
685
686
        first_iter = self.surr_offset is None or self.read_offset is None

        if first_iter:
            self.read_offset = len(self.origin_input_ids_unpadded)
            self.surr_offset = max(
                self.read_offset - INIT_INCREMENTAL_DETOKENIZATION_OFFSET, 0
            )

        all_ids = self.origin_input_ids_unpadded + self.output_ids
687
        return all_ids[self.surr_offset :], self.read_offset - self.surr_offset
Liangsheng Yin's avatar
Liangsheng Yin committed
688

689
    def check_finished(self):
690
        if self.finished():
691
692
            return

693
        if self.to_abort:
694
695
696
            self.finished_reason = FINISH_ABORT(
                message=self.to_abort_message,
            )
697
698
            return

Liangsheng Yin's avatar
Liangsheng Yin committed
699
        if len(self.output_ids) >= self.sampling_params.max_new_tokens:
700
701
702
            self.finished_reason = FINISH_LENGTH(
                length=self.sampling_params.max_new_tokens
            )
703
704
            return

705
706
707
708
709
        if self.grammar is not None:
            if self.grammar.is_terminated():
                self.finished_reason = FINISH_MATCHED_TOKEN(matched=self.output_ids[-1])
                return

710
        last_token_id = self.output_ids[-1]
711

712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
        if not self.sampling_params.ignore_eos:
            matched_eos = False

            # Check stop token ids
            if self.sampling_params.stop_token_ids:
                matched_eos = last_token_id in self.sampling_params.stop_token_ids
            if self.eos_token_ids:
                matched_eos |= last_token_id in self.eos_token_ids
            if self.tokenizer is not None:
                matched_eos |= last_token_id == self.tokenizer.eos_token_id
                if self.tokenizer.additional_stop_token_ids:
                    matched_eos |= (
                        last_token_id in self.tokenizer.additional_stop_token_ids
                    )
            if matched_eos:
                self.finished_reason = FINISH_MATCHED_TOKEN(matched=last_token_id)
                return
729

730
731
732
733
734
735
736
737
        if last_token_id > self.vocab_size or last_token_id < 0:
            if self.sampling_params.stop_token_ids:
                self.output_ids[-1] = next(iter(self.sampling_params.stop_token_ids))
            if self.eos_token_ids:
                self.output_ids[-1] = next(iter(self.eos_token_ids))
            self.finished_reason = FINISH_MATCHED_STR(matched="NaN happened")
            return

738
        # Check stop strings
739
740
741
742
743
744
        if len(self.sampling_params.stop_strs) > 0:
            tail_str = self.tokenizer.decode(
                self.output_ids[-(self.sampling_params.stop_str_max_len + 1) :]
            )

            for stop_str in self.sampling_params.stop_strs:
Liangsheng Yin's avatar
Liangsheng Yin committed
745
                if stop_str in tail_str or stop_str in self.decoded_text:
746
                    self.finished_reason = FINISH_MATCHED_STR(matched=stop_str)
747
748
                    return

749
750
751
    def reset_for_retract(self):
        self.prefix_indices = []
        self.last_node = None
Hanming Lu's avatar
Hanming Lu committed
752
        self.swa_uuid_for_lock = None
753
754
        self.extend_input_len = 0
        self.is_retracted = True
755
756
757
758
759
760
        self.input_token_logprobs = None
        self.temp_input_top_logprobs_val = None
        self.temp_input_top_logprobs_idx = None
        self.extend_logprob_start_len = 0
        self.is_chunked = 0
        self.req_pool_idx = None
761
        self.already_computed = 0
762

Lianmin Zheng's avatar
Lianmin Zheng committed
763
764
765
766
767
768
769
770
771
772
773
774
775
    def offload_kv_cache(self, req_to_token_pool, token_to_kv_pool_allocator):
        token_indices = req_to_token_pool.req_to_token[
            self.req_pool_idx, : self.seqlen - 1
        ]
        self.kv_cache_cpu = token_to_kv_pool_allocator.get_cpu_copy(token_indices)

    def load_kv_cache(self, req_to_token_pool, token_to_kv_pool_allocator):
        token_indices = req_to_token_pool.req_to_token[
            self.req_pool_idx, : self.seqlen - 1
        ]
        token_to_kv_pool_allocator.load_cpu_copy(self.kv_cache_cpu, token_indices)
        del self.kv_cache_cpu

776
777
778
779
780
781
782
783
784
785
786
787
    def log_time_stats(self):
        # If overlap schedule, we schedule one decode batch ahead so this gets called twice.
        if self.has_log_time_stats is True:
            return

        if self.bootstrap_room is not None:
            prefix = f"Req Time Stats(rid={self.rid}, bootstrap_room={self.bootstrap_room}, input len={len(self.origin_input_ids)}, output len={len(self.output_ids)}, type={self.time_stats.get_type().value})"
        else:
            prefix = f"Req Time Stats(rid={self.rid}, input len={len(self.origin_input_ids)}, output len={len(self.output_ids)}, type={self.time_stats.get_type().value})"
        logger.info(f"{prefix}: {self.time_stats}")
        self.has_log_time_stats = True

788
789
790
791
792
793
    def set_finish_with_abort(self, error_msg: str):
        if get_tensor_model_parallel_rank() == 0:
            logger.error(f"{error_msg}, {self.rid=}")
        self.multimodal_inputs = None
        self.grammar = None
        self.origin_input_ids = [0]  # set it to one token to skip the long prefill
794
        self.return_logprob = False
795
796
797
798
        self.finished_reason = FINISH_ABORT(
            error_msg, HTTPStatus.BAD_REQUEST, "BadRequestError"
        )

Lianmin Zheng's avatar
Lianmin Zheng committed
799
    def __repr__(self):
800
        return (
801
            f"Req(rid={self.rid}, "
Lianmin Zheng's avatar
Lianmin Zheng committed
802
803
804
            f"input_ids={self.origin_input_ids}, output_ids={self.output_ids}, "
            f"{self.grammar=}, "
            f"{self.sampling_params=})"
805
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
806
807


Lianmin Zheng's avatar
Lianmin Zheng committed
808
# Batch id
809
810
811
bid = 0


812
@dataclasses.dataclass
Byron Hsu's avatar
Byron Hsu committed
813
class ScheduleBatch(ScheduleBatchDisaggregationDecodeMixin):
814
    """Store all information of a batch on the scheduler."""
815

816
    # Request, memory pool, and cache
817
    reqs: List[Req]
818
    req_to_token_pool: ReqToTokenPool = None
819
    token_to_kv_pool_allocator: BaseTokenToKVPoolAllocator = None
820
    tree_cache: BasePrefixCache = None
Hanming Lu's avatar
Hanming Lu committed
821
    is_hybrid: bool = False
822

823
    # Batch configs
824
    model_config: ModelConfig = None
Liangsheng Yin's avatar
Liangsheng Yin committed
825
    forward_mode: ForwardMode = None
826
    enable_overlap: bool = False
Lianmin Zheng's avatar
Lianmin Zheng committed
827
828
829
830
    # Tell whether the current running batch is full so that we can skip
    # the check of whether to prefill new requests.
    # This is an optimization to reduce the overhead of the prefill check.
    batch_is_full: bool = False
831

832
833
834
    # Events
    launch_done: Optional[threading.Event] = None

835
836
837
    # For chunked prefill in PP
    chunked_req: Optional[Req] = None

838
    # Sampling info
839
    sampling_info: SamplingBatchInfo = None
840
    next_batch_sampling_info: SamplingBatchInfo = None
Liangsheng Yin's avatar
Liangsheng Yin committed
841

842
    # Batched arguments to model runner
Lianmin Zheng's avatar
Lianmin Zheng committed
843
    input_ids: torch.Tensor = None  # shape: [b], int64
844
    input_embeds: torch.Tensor = None  # shape: [b, hidden_size], float32
woodx's avatar
woodx committed
845
    token_type_ids: torch.Tensor = None  # shape: [b], int64
Lianmin Zheng's avatar
Lianmin Zheng committed
846
    req_pool_indices: torch.Tensor = None  # shape: [b], int64
847
    seq_lens: torch.Tensor = None  # shape: [b], int64
848
    # The output locations of the KV cache
Lianmin Zheng's avatar
Lianmin Zheng committed
849
850
    out_cache_loc: torch.Tensor = None  # shape: [b], int64
    output_ids: torch.Tensor = None  # shape: [b], int64
851

852
853
854
    # For multimodal inputs
    multimodal_inputs: Optional[List] = None

855
856
    # The sum of all sequence lengths
    seq_lens_sum: int = None
857
858
    # The original sequence lengths, Qwen-1M related
    orig_seq_lens: torch.Tensor = None  # shape: [b], int32
859

Ke Bao's avatar
Ke Bao committed
860
861
    # For DP attention
    global_num_tokens: Optional[List[int]] = None
862
    global_num_tokens_for_logprob: Optional[List[int]] = None
863
    is_extend_in_batch: bool = False
864
    can_run_dp_cuda_graph: bool = False
865
866
    tbo_split_seq_index: Optional[int] = None
    global_forward_mode: Optional[ForwardMode] = None
Ke Bao's avatar
Ke Bao committed
867

868
    # For processing logprobs
869
    return_logprob: bool = False
870
    top_logprobs_nums: Optional[List[int]] = None
871
    token_ids_logprobs: Optional[List[List[int]]] = None
872

Lianmin Zheng's avatar
Lianmin Zheng committed
873
874
875
876
    # For logits and logprob post processing
    temp_scaled_logprobs: bool = False
    top_p_normalized_logprobs: bool = False

877
878
879
    # For extend and mixed chunekd prefill
    prefix_lens: List[int] = None
    extend_lens: List[int] = None
880
    extend_num_tokens: Optional[int] = None
881
    decoding_reqs: List[Req] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
882
    extend_logprob_start_lens: List[int] = None
883
884
    # It comes empty list if logprob is not required.
    extend_input_logprob_token_ids: Optional[torch.Tensor] = None
885

Lianmin Zheng's avatar
Lianmin Zheng committed
886
    # For encoder-decoder architectures
887
888
889
890
891
    encoder_cached: Optional[List[bool]] = None
    encoder_lens: Optional[torch.Tensor] = None
    encoder_lens_cpu: Optional[List[int]] = None
    encoder_out_cache_loc: Optional[torch.Tensor] = None

892
893
894
    # Stream
    has_stream: bool = False

895
896
    # Has grammar
    has_grammar: bool = False
897

898
    # Device
899
900
    device: str = "cuda"

901
    # Speculative decoding
902
    spec_algorithm: SpeculativeAlgorithm = None
903
    spec_info: Optional[Union[EagleDraftInput, EagleVerifyInput]] = None
904

905
906
907
    # Whether to return hidden states
    return_hidden_states: bool = False

908
909
910
    # Whether this batch is prefill-only (no token generation needed)
    is_prefill_only: bool = False

911
912
913
    # hicache pointer for synchronizing data loading from CPU to GPU
    hicache_consumer_index: int = 0

914
    @classmethod
915
916
    def init_new(
        cls,
917
        reqs: List[Req],
918
        req_to_token_pool: ReqToTokenPool,
919
        token_to_kv_pool_allocator: BaseTokenToKVPoolAllocator,
920
921
922
        tree_cache: BasePrefixCache,
        model_config: ModelConfig,
        enable_overlap: bool,
923
        spec_algorithm: SpeculativeAlgorithm,
924
        chunked_req: Optional[Req] = None,
925
    ):
Lianmin Zheng's avatar
Lianmin Zheng committed
926
927
        return_logprob = any(req.return_logprob for req in reqs)

Hanming Lu's avatar
Hanming Lu committed
928
929
        is_hybrid = False
        if isinstance(token_to_kv_pool_allocator, SWATokenToKVPoolAllocator):
930
931
932
933
            assert (
                tree_cache is None
                or isinstance(tree_cache, SWARadixCache)
                or isinstance(tree_cache, SWAChunkCache)
Hanming Lu's avatar
Hanming Lu committed
934
935
936
            ), "SWARadixCache or SWAChunkCache is required for SWATokenToKVPoolAllocator"
            is_hybrid = True

937
938
939
        return cls(
            reqs=reqs,
            req_to_token_pool=req_to_token_pool,
940
            token_to_kv_pool_allocator=token_to_kv_pool_allocator,
941
            tree_cache=tree_cache,
Hanming Lu's avatar
Hanming Lu committed
942
            is_hybrid=is_hybrid,
943
            model_config=model_config,
944
            enable_overlap=enable_overlap,
Lianmin Zheng's avatar
Lianmin Zheng committed
945
            return_logprob=return_logprob,
946
            has_stream=any(req.stream for req in reqs),
947
            has_grammar=any(req.grammar for req in reqs),
Zhang, Liangang's avatar
Zhang, Liangang committed
948
            device=req_to_token_pool.device,
949
            spec_algorithm=spec_algorithm,
950
            return_hidden_states=any(req.return_hidden_states for req in reqs),
951
952
953
            is_prefill_only=all(
                req.sampling_params.max_new_tokens == 0 for req in reqs
            ),
954
            chunked_req=chunked_req,
Lianmin Zheng's avatar
Lianmin Zheng committed
955
956
        )

957
    def batch_size(self):
958
        return len(self.reqs)
959

Lianmin Zheng's avatar
Lianmin Zheng committed
960
961
962
    def is_empty(self):
        return len(self.reqs) == 0

963
    def alloc_req_slots(self, num_reqs: int):
964
965
966
        req_pool_indices = self.req_to_token_pool.alloc(num_reqs)
        if req_pool_indices is None:
            raise RuntimeError(
967
968
969
970
                "alloc_req_slots runs out of memory. "
                "Please set a smaller number for `--max-running-requests`. "
                f"{self.req_to_token_pool.available_size()=}, "
                f"{num_reqs=}, "
971
972
973
            )
        return req_pool_indices

974
    def alloc_token_slots(self, num_tokens: int, backup_state: bool = False):
Hanming Lu's avatar
Hanming Lu committed
975
        self._evict_tree_cache_if_needed(num_tokens)
Lianmin Zheng's avatar
Lianmin Zheng committed
976

977
978
979
        if backup_state:
            state = self.token_to_kv_pool_allocator.backup_state()

980
        out_cache_loc = self.token_to_kv_pool_allocator.alloc(num_tokens)
Lianmin Zheng's avatar
Lianmin Zheng committed
981
982
983
984
985
        if out_cache_loc is None:
            phase_str = "Prefill" if self.forward_mode.is_extend() else "Decode"
            error_msg = (
                f"{phase_str} out of memory. Try to lower your batch size.\n"
                f"Try to allocate {num_tokens} tokens.\n"
Hanming Lu's avatar
Hanming Lu committed
986
                f"{self._available_and_evictable_str()}"
Lianmin Zheng's avatar
Lianmin Zheng committed
987
988
989
990
991
992
            )
            logger.error(error_msg)
            if self.tree_cache is not None:
                self.tree_cache.pretty_print()
            raise RuntimeError(error_msg)

993
994
995
996
        if backup_state:
            return out_cache_loc, state
        else:
            return out_cache_loc
Lianmin Zheng's avatar
Lianmin Zheng committed
997
998
999
1000
1001
1002
1003

    def alloc_paged_token_slots_extend(
        self,
        prefix_lens: torch.Tensor,
        seq_lens: torch.Tensor,
        last_loc: torch.Tensor,
        extend_num_tokens: int,
1004
        backup_state: bool = False,
Lianmin Zheng's avatar
Lianmin Zheng committed
1005
    ):
Lianmin Zheng's avatar
Lianmin Zheng committed
1006
        # Over estimate the number of tokens: assume each request needs a new page.
Hanming Lu's avatar
Hanming Lu committed
1007
1008
        num_tokens = (
            extend_num_tokens
Lianmin Zheng's avatar
Lianmin Zheng committed
1009
            + len(seq_lens) * self.token_to_kv_pool_allocator.page_size
Hanming Lu's avatar
Hanming Lu committed
1010
1011
        )
        self._evict_tree_cache_if_needed(num_tokens)
1012

1013
1014
1015
        if backup_state:
            state = self.token_to_kv_pool_allocator.backup_state()

Lianmin Zheng's avatar
Lianmin Zheng committed
1016
1017
1018
        out_cache_loc = self.token_to_kv_pool_allocator.alloc_extend(
            prefix_lens, seq_lens, last_loc, extend_num_tokens
        )
1019
        if out_cache_loc is None:
Lianmin Zheng's avatar
Lianmin Zheng committed
1020
1021
1022
            error_msg = (
                f"Prefill out of memory. Try to lower your batch size.\n"
                f"Try to allocate {extend_num_tokens} tokens.\n"
Hanming Lu's avatar
Hanming Lu committed
1023
                f"{self._available_and_evictable_str()}"
Lianmin Zheng's avatar
Lianmin Zheng committed
1024
1025
1026
            )
            logger.error(error_msg)
            raise RuntimeError(error_msg)
1027
1028
1029
1030
1031

        if backup_state:
            return out_cache_loc, state
        else:
            return out_cache_loc
Lianmin Zheng's avatar
Lianmin Zheng committed
1032
1033
1034
1035
1036

    def alloc_paged_token_slots_decode(
        self,
        seq_lens: torch.Tensor,
        last_loc: torch.Tensor,
1037
        backup_state: bool = False,
Lianmin Zheng's avatar
Lianmin Zheng committed
1038
    ):
Lianmin Zheng's avatar
Lianmin Zheng committed
1039
        # Over estimate the number of tokens: assume each request needs a new page.
Hanming Lu's avatar
Hanming Lu committed
1040
1041
        num_tokens = len(seq_lens) * self.token_to_kv_pool_allocator.page_size
        self._evict_tree_cache_if_needed(num_tokens)
1042

1043
1044
1045
1046
        if backup_state:
            state = self.token_to_kv_pool_allocator.backup_state()

        out_cache_loc = self.token_to_kv_pool_allocator.alloc_decode(seq_lens, last_loc)
Lianmin Zheng's avatar
Lianmin Zheng committed
1047
1048
1049
1050
        if out_cache_loc is None:
            error_msg = (
                f"Decode out of memory. Try to lower your batch size.\n"
                f"Try to allocate {len(seq_lens)} tokens.\n"
Hanming Lu's avatar
Hanming Lu committed
1051
                f"{self._available_and_evictable_str()}"
Lianmin Zheng's avatar
Lianmin Zheng committed
1052
1053
1054
            )
            logger.error(error_msg)
            raise RuntimeError(error_msg)
1055
1056
1057
1058
1059

        if backup_state:
            return out_cache_loc, state
        else:
            return out_cache_loc
1060

1061
1062
1063
1064
1065
    def prepare_encoder_info_extend(self, input_ids: List[int], seq_lens: List[int]):
        self.encoder_lens_cpu = []
        self.encoder_cached = []

        for req in self.reqs:
Mick's avatar
Mick committed
1066
            im = req.multimodal_inputs
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
            if im is None or im.num_image_tokens is None:
                # No image input
                self.encoder_lens_cpu.append(0)
                self.encoder_cached.append(True)
            else:
                self.encoder_lens_cpu.append(im.num_image_tokens)
                self.encoder_cached.append(
                    self.forward_mode.is_decode()
                    or len(req.prefix_indices) >= im.num_image_tokens
                )

1078
        self.encoder_lens = torch.tensor(self.encoder_lens_cpu, dtype=torch.int64).to(
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
            self.device, non_blocking=True
        )

        # Strip encoder infos
        pt = 0
        decoder_out_cache_loc = []
        encoder_out_cache_loc = []
        for i, req in enumerate(self.reqs):
            encoder_len = self.encoder_lens_cpu[i]
            seq_lens[i] -= encoder_len

            if len(req.prefix_indices) < encoder_len:
1091
                # NOTE: the encoder part should be considered as a whole
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
                assert len(req.prefix_indices) == 0
                input_ids[i] = input_ids[i][encoder_len:]
                encoder_out_cache_loc.append(self.out_cache_loc[pt : pt + encoder_len])
                decoder_out_cache_loc.append(
                    self.out_cache_loc[pt + encoder_len : pt + req.extend_input_len]
                )
                self.extend_lens[i] -= encoder_len
                self.extend_num_tokens -= encoder_len
            else:
                decoder_out_cache_loc.append(
                    self.out_cache_loc[pt : pt + req.extend_input_len]
                )
                self.prefix_lens[i] -= encoder_len

            pt += req.extend_input_len

        # Reassign
Lianmin Zheng's avatar
Lianmin Zheng committed
1109
        self.input_ids = torch.tensor(sum(input_ids, []), dtype=torch.int64).to(
1110
1111
            self.device, non_blocking=True
        )
1112
        self.seq_lens = torch.tensor(seq_lens, dtype=torch.int64).to(
1113
1114
1115
1116
            self.device, non_blocking=True
        )

        if not decoder_out_cache_loc:
Lianmin Zheng's avatar
Lianmin Zheng committed
1117
            self.out_cache_loc = torch.zeros(0, dtype=torch.int64).to(
1118
1119
1120
1121
1122
1123
                self.device, non_blocking=True
            )
        else:
            self.out_cache_loc = torch.cat(decoder_out_cache_loc)

        if not encoder_out_cache_loc:
Lianmin Zheng's avatar
Lianmin Zheng committed
1124
            self.encoder_out_cache_loc = torch.zeros(0, dtype=torch.int64).to(
1125
1126
1127
1128
1129
                self.device, non_blocking=True
            )
        else:
            self.encoder_out_cache_loc = torch.cat(encoder_out_cache_loc)

1130
1131
1132
        assert (
            len(self.out_cache_loc) == self.extend_num_tokens
        ), f"Expected {len(self.out_cache_loc)}, got {self.extend_num_tokens}"
1133

1134
    def prepare_for_extend(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
1135
1136
        self.forward_mode = ForwardMode.EXTEND

Lianmin Zheng's avatar
Lianmin Zheng committed
1137
        # Allocate req slots
1138
        bs = len(self.reqs)
Lianmin Zheng's avatar
Lianmin Zheng committed
1139
1140
1141
        req_pool_indices = self.alloc_req_slots(bs)

        # Init tensors
Lianmin Zheng's avatar
Lianmin Zheng committed
1142
        reqs = self.reqs
1143
        input_ids = [r.fill_ids[len(r.prefix_indices) :] for r in reqs]
1144
        extend_num_tokens = sum(len(ids) for ids in input_ids)
Lianmin Zheng's avatar
Lianmin Zheng committed
1145
        seq_lens = [len(r.fill_ids) for r in reqs]
1146
        orig_seq_lens = [max(len(r.fill_ids), len(r.origin_input_ids)) for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1147
1148
        prefix_lens = [len(r.prefix_indices) for r in reqs]
        extend_lens = [r.extend_input_len for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1149

woodx's avatar
woodx committed
1150
1151
1152
1153
        token_type_ids = [
            r.token_type_ids for r in reqs if r.token_type_ids is not None
        ]

Lianmin Zheng's avatar
Lianmin Zheng committed
1154
1155
1156
        req_pool_indices_tensor = torch.tensor(req_pool_indices, dtype=torch.int64).to(
            self.device, non_blocking=True
        )
1157
1158
1159
        input_ids_tensor = torch.tensor(
            list(chain.from_iterable(input_ids)), dtype=torch.int64
        ).to(self.device, non_blocking=True)
Lianmin Zheng's avatar
Lianmin Zheng committed
1160
1161
1162
        seq_lens_tensor = torch.tensor(seq_lens, dtype=torch.int64).to(
            self.device, non_blocking=True
        )
1163
1164
1165
        orig_seq_lens_tensor = torch.tensor(orig_seq_lens, dtype=torch.int32).to(
            self.device, non_blocking=True
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
1166
1167
1168
        prefix_lens_tensor = torch.tensor(
            prefix_lens, dtype=torch.int64, device=self.device
        )
woodx's avatar
woodx committed
1169
1170
1171
1172
1173
1174
1175

        token_type_ids_tensor = None
        if len(token_type_ids) > 0:
            token_type_ids_tensor = torch.tensor(
                sum(token_type_ids, []), dtype=torch.int64
            ).to(self.device, non_blocking=True)

Lianmin Zheng's avatar
Lianmin Zheng committed
1176
        extend_lens_tensor = seq_lens_tensor - prefix_lens_tensor
1177

Lianmin Zheng's avatar
Lianmin Zheng committed
1178
        # Copy prefix and do some basic check
Rin Intachuen's avatar
Rin Intachuen committed
1179
        input_embeds = []
1180
        extend_input_logprob_token_ids = []
1181
        multimodal_inputs = []
Rin Intachuen's avatar
Rin Intachuen committed
1182

Lianmin Zheng's avatar
Lianmin Zheng committed
1183
        for i, (req, seq_len, pre_len) in enumerate(zip(reqs, seq_lens, prefix_lens)):
1184
            req.req_pool_idx = req_pool_indices[i]
1185
            assert seq_len - pre_len == req.extend_input_len
Lianmin Zheng's avatar
Lianmin Zheng committed
1186

1187
            if pre_len > 0:
1188
1189
                self.req_to_token_pool.write(
                    (req.req_pool_idx, slice(0, pre_len)), req.prefix_indices
1190
                )
tarinkk's avatar
tarinkk committed
1191
                if isinstance(self.tree_cache, SWAChunkCache):
Hanming Lu's avatar
Hanming Lu committed
1192
                    self.tree_cache.evict_swa(
tarinkk's avatar
tarinkk committed
1193
1194
                        req, pre_len, self.model_config.attention_chunk_size
                    )
1195

Rin Intachuen's avatar
Rin Intachuen committed
1196
1197
1198
1199
1200
            # If input_embeds are available, store them
            if req.input_embeds is not None:
                # If req.input_embeds is already a list, append its content directly
                input_embeds.extend(req.input_embeds)  # Use extend to avoid nesting

1201
1202
            multimodal_inputs.append(req.multimodal_inputs)

1203
1204
            req.cached_tokens += pre_len - req.already_computed
            req.already_computed = seq_len
1205
            req.is_retracted = False
Lianmin Zheng's avatar
Lianmin Zheng committed
1206

1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
            # Compute the relative logprob_start_len in an extend batch
            if req.logprob_start_len >= pre_len:
                req.extend_logprob_start_len = min(
                    req.logprob_start_len - pre_len,
                    req.extend_input_len,
                    req.seqlen - 1,
                )
            else:
                req.extend_logprob_start_len = 0

            if self.return_logprob:
                # Find input logprob token ids.
                # First, find a global index within origin_input_ids and slide it by 1
                # to compute input logprobs. It is because you need the next token
                # to compute input logprobs. E.g., (chunk size 2)
                #
                # input_logprobs = [1, 2, 3, 4]
                # fill_ids = [1, 2]
                # extend_input_logprob_token_id = [2, 3]
                #
                # Note that it can also overflow. In this case, we pad it with 0.
                # input_logprobs = [1, 2, 3, 4]
                # fill_ids = [3, 4]
                # extend_input_logprob_token_id = [4, 0]
                global_start_idx, global_end_idx = (
                    len(req.prefix_indices),
                    len(req.fill_ids),
                )
                # Apply logprob_start_len
                if global_start_idx < req.logprob_start_len:
                    global_start_idx = req.logprob_start_len

                logprob_token_ids = req.origin_input_ids[
                    global_start_idx + 1 : global_end_idx + 1
                ]
                extend_input_logprob_token_ids.extend(logprob_token_ids)

                # We will need req.extend_input_len - req.extend_logprob_start_len number of
                # tokens, and logprob_token_ids is for input logprob, so pad the rest of them by 0.
                extend_input_logprob_token_ids.extend(
                    [0]
                    * (
                        req.extend_input_len
                        - req.extend_logprob_start_len
                        - len(logprob_token_ids)
                    )
                )

        if self.return_logprob:
            extend_input_logprob_token_ids = torch.tensor(
                extend_input_logprob_token_ids
            )
        else:
            extend_input_logprob_token_ids = None
Lianmin Zheng's avatar
Lianmin Zheng committed
1261

Lianmin Zheng's avatar
Lianmin Zheng committed
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
        # Allocate memory
        if self.token_to_kv_pool_allocator.page_size == 1:
            out_cache_loc = self.alloc_token_slots(extend_num_tokens)
        else:
            last_loc = get_last_loc(
                self.req_to_token_pool.req_to_token,
                req_pool_indices_tensor,
                prefix_lens_tensor,
            )
            out_cache_loc = self.alloc_paged_token_slots_extend(
                prefix_lens_tensor, seq_lens_tensor, last_loc, extend_num_tokens
            )

Lianmin Zheng's avatar
Lianmin Zheng committed
1275
        # Set fields
Lianmin Zheng's avatar
Lianmin Zheng committed
1276
1277
1278
        self.input_ids = input_ids_tensor
        self.req_pool_indices = req_pool_indices_tensor
        self.seq_lens = seq_lens_tensor
1279
        self.orig_seq_lens = orig_seq_lens_tensor
Lianmin Zheng's avatar
Lianmin Zheng committed
1280
        self.out_cache_loc = out_cache_loc
Rin Intachuen's avatar
Rin Intachuen committed
1281
1282
1283
1284
1285
        self.input_embeds = (
            torch.tensor(input_embeds).to(self.device, non_blocking=True)
            if input_embeds
            else None
        )
1286
1287
1288
1289
        for mm_input in multimodal_inputs:
            if mm_input is None:
                continue
            for mm_item in mm_input.mm_items:
1290
                pixel_values = getattr(mm_item, "feature", None)
1291
                if isinstance(pixel_values, torch.Tensor):
1292
                    mm_item.feature = pixel_values.to(self.device, non_blocking=True)
1293
        self.multimodal_inputs = multimodal_inputs
woodx's avatar
woodx committed
1294
        self.token_type_ids = token_type_ids_tensor
1295
        self.seq_lens_sum = sum(seq_lens)
Lianmin Zheng's avatar
Lianmin Zheng committed
1296

1297
1298
        if self.return_logprob:
            self.top_logprobs_nums = [r.top_logprobs_num for r in reqs]
1299
            self.token_ids_logprobs = [r.token_ids_logprob for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1300

1301
        self.extend_logprob_start_lens = [r.extend_logprob_start_len for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1302
1303
1304
        self.extend_num_tokens = extend_num_tokens
        self.prefix_lens = prefix_lens
        self.extend_lens = extend_lens
1305
        self.extend_input_logprob_token_ids = extend_input_logprob_token_ids
Lianmin Zheng's avatar
Lianmin Zheng committed
1306

1307
        # Write to req_to_token_pool
1308
        if support_triton(global_server_args_dict.get("attention_backend")):
Lianmin Zheng's avatar
Lianmin Zheng committed
1309
1310
            # TODO: some tensors can be reused for ForwardBatchInfo (e.g., extend_lens, cumsum_start)

1311
1312
            write_req_to_token_pool_triton[(bs,)](
                self.req_to_token_pool.req_to_token,
Lianmin Zheng's avatar
Lianmin Zheng committed
1313
1314
1315
1316
1317
                req_pool_indices_tensor,
                prefix_lens_tensor,
                seq_lens_tensor,
                extend_lens_tensor,
                out_cache_loc,
1318
1319
1320
1321
1322
1323
                self.req_to_token_pool.req_to_token.shape[1],
            )
        else:
            pt = 0
            for i in range(bs):
                self.req_to_token_pool.write(
Lianmin Zheng's avatar
Lianmin Zheng committed
1324
1325
                    (req_pool_indices[i], slice(prefix_lens[i], seq_lens[i])),
                    out_cache_loc[pt : pt + extend_lens[i]],
1326
                )
Lianmin Zheng's avatar
Lianmin Zheng committed
1327
                pt += extend_lens[i]
1328

1329
1330
1331
        if self.model_config.is_encoder_decoder:
            self.prepare_encoder_info_extend(input_ids, seq_lens)

1332
        # Build sampling info
1333
        self.sampling_info = SamplingBatchInfo.from_schedule_batch(
1334
1335
            self,
            self.model_config.vocab_size,
1336
        )
1337

1338
1339
1340
1341
1342
    def prepare_for_split_prefill(self):
        self.prepare_for_extend()
        # For split prefill, we need to set the forward mode to SPLIT_PREFILL
        self.forward_mode = ForwardMode.SPLIT_PREFILL

1343
    def mix_with_running(self, running_batch: "ScheduleBatch"):
1344
        self.forward_mode = ForwardMode.MIXED
1345
        running_bs = running_batch.batch_size()
1346
1347
1348
1349
1350

        for req in running_batch.reqs:
            req.fill_ids = req.origin_input_ids + req.output_ids
            req.extend_input_len = 1

1351
        input_ids = torch.cat([self.input_ids, running_batch.input_ids])
1352
        out_cache_loc = torch.cat([self.out_cache_loc, running_batch.out_cache_loc])
1353

1354
        self.merge_batch(running_batch)
1355
1356
        self.input_ids = input_ids
        self.out_cache_loc = out_cache_loc
1357

1358
1359
1360
        # For overlap scheduler, the output_ids has one step delay
        delta = 0 if self.enable_overlap else -1

1361
        # NOTE: prefix_indices is what has been cached, but we don't cache each decode step
1362
        self.prefix_lens.extend(
1363
            [
1364
                len(r.origin_input_ids) + len(r.output_ids) + delta
1365
1366
1367
                for r in running_batch.reqs
            ]
        )
1368
        self.extend_lens.extend([1] * running_bs)
Lianmin Zheng's avatar
Lianmin Zheng committed
1369
1370
        self.extend_num_tokens += running_bs
        # TODO (lianmin): Revisit this. It should be seq_len - 1
1371
        self.extend_logprob_start_lens.extend([0] * running_bs)
1372

1373
1374
1375
1376
    def new_page_count_next_decode(self):
        page_size = self.token_to_kv_pool_allocator.page_size
        if page_size == 1:
            return len(self.reqs)
1377
1378
        # In the decoding phase, the length of a request's KV cache should be
        # the total length of the request minus 1
pansicheng's avatar
pansicheng committed
1379
1380
1381
1382
1383
        return (
            sum(1 for req in self.reqs if req.seqlen % page_size == 0)
            if self.enable_overlap
            else sum(1 for req in self.reqs if (req.seqlen - 1) % page_size == 0)
        )
1384

1385
    def check_decode_mem(self, buf_multiplier=1):
Hanming Lu's avatar
Hanming Lu committed
1386
        num_tokens = (
1387
1388
1389
1390
1391
            self.new_page_count_next_decode()
            * buf_multiplier
            * self.token_to_kv_pool_allocator.page_size
        )

Hanming Lu's avatar
Hanming Lu committed
1392
1393
        self._evict_tree_cache_if_needed(num_tokens)
        return self._is_available_size_sufficient(num_tokens)
1394

1395
    def retract_decode(self, server_args: ServerArgs):
1396
        """Retract the decoding requests when there is not enough memory."""
1397
        sorted_indices = list(range(len(self.reqs)))
Liangsheng Yin's avatar
Liangsheng Yin committed
1398
1399

        # TODO(lsyin): improve retraction policy for radix cache
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
        # For spec decoding, filter_batch API can only filter
        # requests from the back, so we can only retract from the back.
        # TODO(sang): Clean up finish path and support better retract
        # policy.
        if not server_args.speculative_algorithm:
            sorted_indices.sort(
                key=lambda i: (
                    len(self.reqs[i].output_ids),
                    -len(self.reqs[i].origin_input_ids),
                ),
                reverse=True,
            )

        def get_required_tokens(num_reqs: int):
            headroom_for_spec_decode = 0
            if server_args.speculative_algorithm:
                headroom_for_spec_decode += (
                    num_reqs
                    * server_args.speculative_eagle_topk
                    * server_args.speculative_num_steps
                    + num_reqs * server_args.speculative_num_draft_tokens
                )
            return (
                num_reqs * global_config.retract_decode_steps + headroom_for_spec_decode
            )
1425

Hanming Lu's avatar
Hanming Lu committed
1426
1427
1428
1429
1430
1431
1432
1433
1434
        def _get_available_size():
            if self.is_hybrid:
                return min(
                    self.token_to_kv_pool_allocator.full_available_size(),
                    self.token_to_kv_pool_allocator.swa_available_size(),
                )
            else:
                return self.token_to_kv_pool_allocator.available_size()

Lianmin Zheng's avatar
Lianmin Zheng committed
1435
1436
1437
        retracted_reqs = []
        seq_lens_cpu = self.seq_lens.cpu().numpy()
        first_iter = True
Liangsheng Yin's avatar
Liangsheng Yin committed
1438
        while (
Hanming Lu's avatar
Hanming Lu committed
1439
            _get_available_size() < get_required_tokens(len(sorted_indices))
1440
            or first_iter
Liangsheng Yin's avatar
Liangsheng Yin committed
1441
1442
1443
        ):
            if len(sorted_indices) == 1:
                # Corner case: only one request left
Hanming Lu's avatar
Hanming Lu committed
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
                if self.is_hybrid:
                    full_available_size = (
                        self.token_to_kv_pool_allocator.full_available_size()
                    )
                    swa_available_size = (
                        self.token_to_kv_pool_allocator.swa_available_size()
                    )
                    assert (
                        full_available_size > 0 and swa_available_size > 0
                    ), f"No space left for only one request in SWA mode {full_available_size=}, {swa_available_size=}"
                else:
                    assert (
                        self.token_to_kv_pool_allocator.available_size() > 0
                    ), f"No space left for only one request, {self.token_to_kv_pool_allocator.available_size()=}"
Liangsheng Yin's avatar
Liangsheng Yin committed
1458
1459
                break

1460
            first_iter = False
1461
1462
1463
1464
            idx = sorted_indices.pop()
            req = self.reqs[idx]
            retracted_reqs.append(req)

1465
1466
1467
1468
1469
            if server_args.disaggregation_mode == "decode":
                req.offload_kv_cache(
                    self.req_to_token_pool, self.token_to_kv_pool_allocator
                )

1470
1471
            if isinstance(self.tree_cache, ChunkCache):
                # ChunkCache does not have eviction
1472
1473
                token_indices = self.req_to_token_pool.req_to_token[
                    req.req_pool_idx, : seq_lens_cpu[idx]
1474
                ]
1475
                self.token_to_kv_pool_allocator.free(token_indices)
1476
                self.req_to_token_pool.free(req.req_pool_idx)
1477
1478
            else:
                # TODO: apply more fine-grained retraction
1479
                last_uncached_pos = (
1480
1481
                    len(req.prefix_indices) // server_args.page_size
                ) * server_args.page_size
1482
1483
                token_indices = self.req_to_token_pool.req_to_token[
                    req.req_pool_idx, last_uncached_pos : seq_lens_cpu[idx]
1484
                ]
1485
                self.token_to_kv_pool_allocator.free(token_indices)
1486
                self.req_to_token_pool.free(req.req_pool_idx)
1487
1488

                # release the last node
Hanming Lu's avatar
Hanming Lu committed
1489
1490
1491
1492
                if self.is_hybrid:
                    self.tree_cache.dec_lock_ref(req.last_node, req.swa_uuid_for_lock)
                else:
                    self.tree_cache.dec_lock_ref(req.last_node)
1493
1494

                # NOTE(lsyin): we should use the newly evictable memory instantly.
Hanming Lu's avatar
Hanming Lu committed
1495
1496
                num_tokens = len(sorted_indices) * global_config.retract_decode_steps
                self._evict_tree_cache_if_needed(num_tokens)
1497

1498
            req.reset_for_retract()
Liangsheng Yin's avatar
Liangsheng Yin committed
1499

1500
1501
1502
1503
1504
1505
            if len(retracted_reqs) == 0:
                # Corner case: only one request left
                raise ValueError(
                    "Failed to retract any request. No space left for only one request."
                )

1506
        self.filter_batch(keep_indices=sorted_indices)
1507

Liangsheng Yin's avatar
Liangsheng Yin committed
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
        # Reqs in batch are filtered
        total_decoded_tokens = sum(len(r.output_ids) for r in self.reqs)
        total_max_new_tokens = sum(r.sampling_params.max_new_tokens for r in self.reqs)

        new_estimate_ratio = (
            total_decoded_tokens + global_config.retract_decode_steps * len(self.reqs)
        ) / total_max_new_tokens
        new_estimate_ratio = min(1.0, new_estimate_ratio)

        return retracted_reqs, new_estimate_ratio
1518

1519
1520
1521
1522
    def prepare_encoder_info_decode(self):
        # Reset the encoder cached status
        self.encoder_cached = [True] * len(self.reqs)

Ke Bao's avatar
Ke Bao committed
1523
1524
    def prepare_for_idle(self):
        self.forward_mode = ForwardMode.IDLE
Lianmin Zheng's avatar
Lianmin Zheng committed
1525
        self.input_ids = torch.empty(0, dtype=torch.int64, device=self.device)
1526
        self.seq_lens = torch.empty(0, dtype=torch.int64, device=self.device)
1527
        self.orig_seq_lens = torch.empty(0, dtype=torch.int32, device=self.device)
Lianmin Zheng's avatar
Lianmin Zheng committed
1528
        self.out_cache_loc = torch.empty(0, dtype=torch.int64, device=self.device)
1529
        self.req_pool_indices = torch.empty(0, dtype=torch.int32, device=self.device)
1530
        self.seq_lens_sum = 0
Ke Bao's avatar
Ke Bao committed
1531
        self.extend_num_tokens = 0
1532
1533
1534
1535
        self.sampling_info = SamplingBatchInfo.from_schedule_batch(
            self,
            self.model_config.vocab_size,
        )
Ke Bao's avatar
Ke Bao committed
1536

1537
    def prepare_for_decode(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
1538
        self.forward_mode = ForwardMode.DECODE
Lianmin Zheng's avatar
Lianmin Zheng committed
1539
1540
        bs = len(self.reqs)

1541
        if self.spec_algorithm.is_eagle():
1542
1543
            # if spec decoding is used, the decode batch is prepared inside
            # `forward_batch_speculative_generation` after running draft models.
1544
            return
Liangsheng Yin's avatar
Liangsheng Yin committed
1545

1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
        if self.sampling_info.penalizer_orchestrator.is_required:
            if self.enable_overlap:
                # TODO: this can be slow, optimize this.
                delayed_output_ids = torch.tensor(
                    [
                        (
                            req.output_ids[-1]
                            if len(req.output_ids)
                            else req.origin_input_ids[-1]
                        )
                        for req in self.reqs
                    ],
                    dtype=torch.int64,
                    device=self.device,
                )
                self.sampling_info.penalizer_orchestrator.cumulate_output_tokens(
                    delayed_output_ids
                )
            else:
                self.sampling_info.penalizer_orchestrator.cumulate_output_tokens(
                    self.output_ids.to(torch.int64)
                )

Lianmin Zheng's avatar
Lianmin Zheng committed
1569
        # Update fields
1570
1571
        self.input_ids = self.output_ids
        self.output_ids = None
Lianmin Zheng's avatar
Lianmin Zheng committed
1572

1573
1574
1575
1576
        if self.model_config.is_encoder_decoder:
            locs = self.encoder_lens + self.seq_lens
            self.prepare_encoder_info_decode()
        else:
Lianmin Zheng's avatar
Lianmin Zheng committed
1577
            locs = self.seq_lens.clone()
1578

1579
        if self.enable_overlap:
1580
1581
            # Do not use in-place operations in the overlap mode
            self.seq_lens = self.seq_lens + 1
1582
            self.orig_seq_lens = self.orig_seq_lens + 1
1583
1584
1585
        else:
            # A faster in-place version
            self.seq_lens.add_(1)
1586
            self.orig_seq_lens.add_(1)
1587
        self.seq_lens_sum += bs
Lianmin Zheng's avatar
Lianmin Zheng committed
1588

tarinkk's avatar
tarinkk committed
1589
1590
1591
        # free memory
        if isinstance(self.tree_cache, SWAChunkCache):
            for req in self.reqs:
Hanming Lu's avatar
Hanming Lu committed
1592
                self.tree_cache.evict_swa(
tarinkk's avatar
tarinkk committed
1593
1594
1595
                    req, req.seqlen - 1, self.model_config.attention_chunk_size
                )

Lianmin Zheng's avatar
Lianmin Zheng committed
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
        # Allocate memory
        if self.token_to_kv_pool_allocator.page_size == 1:
            self.out_cache_loc = self.alloc_token_slots(bs)
        else:
            last_loc = self.req_to_token_pool.req_to_token[
                self.req_pool_indices, self.seq_lens - 2
            ]
            self.out_cache_loc = self.alloc_paged_token_slots_decode(
                self.seq_lens, last_loc
            )

        self.req_to_token_pool.write(
            (self.req_pool_indices, locs), self.out_cache_loc.to(torch.int32)
        )

1611
1612
    def filter_batch(
        self,
1613
        chunked_req_to_exclude: Optional[Union[Req, List[Req]]] = None,
1614
1615
1616
        keep_indices: Optional[List[int]] = None,
    ):
        if keep_indices is None:
1617
1618
1619
1620
            if isinstance(chunked_req_to_exclude, Req):
                chunked_req_to_exclude = [chunked_req_to_exclude]
            elif chunked_req_to_exclude is None:
                chunked_req_to_exclude = []
1621
1622
1623
            keep_indices = [
                i
                for i in range(len(self.reqs))
1624
                if not self.reqs[i].finished()
Lianmin Zheng's avatar
Lianmin Zheng committed
1625
                and self.reqs[i] not in chunked_req_to_exclude
1626
1627
1628
            ]

        if keep_indices is None or len(keep_indices) == 0:
1629
1630
1631
1632
            # Filter out all requests
            self.reqs = []
            return

1633
        if len(keep_indices) == len(self.reqs):
1634
1635
1636
            # No need to filter
            return

1637
1638
1639
1640
        keep_indices_device = torch.tensor(keep_indices, dtype=torch.int64).to(
            self.device, non_blocking=True
        )

1641
        if self.model_config.is_encoder_decoder:
1642
            self.encoder_lens = self.encoder_lens[keep_indices_device]
1643
1644
            self.encoder_lens_cpu = [self.encoder_lens_cpu[i] for i in keep_indices]

1645
        self.reqs = [self.reqs[i] for i in keep_indices]
1646
1647
        if self.multimodal_inputs is not None:
            self.multimodal_inputs = [self.multimodal_inputs[i] for i in keep_indices]
1648
1649
        self.req_pool_indices = self.req_pool_indices[keep_indices_device]
        self.seq_lens = self.seq_lens[keep_indices_device]
1650
        self.orig_seq_lens = self.orig_seq_lens[keep_indices_device]
1651
        self.out_cache_loc = None
1652
        self.seq_lens_sum = self.seq_lens.sum().item()
1653
        self.output_ids = self.output_ids[keep_indices_device]
1654
        self.return_logprob = any(req.return_logprob for req in self.reqs)
1655
        if self.return_logprob:
1656
            self.top_logprobs_nums = [self.top_logprobs_nums[i] for i in keep_indices]
1657
            self.token_ids_logprobs = [self.token_ids_logprobs[i] for i in keep_indices]
1658
1659
        else:
            self.top_logprobs_nums = None
1660
            self.token_ids_logprobs = None
1661

1662
        self.has_stream = any(req.stream for req in self.reqs)
1663
        self.has_grammar = any(req.grammar for req in self.reqs)
Lianmin Zheng's avatar
Lianmin Zheng committed
1664

1665
        self.sampling_info.filter_batch(keep_indices, keep_indices_device)
1666
        if self.spec_info:
1667
            self.spec_info.filter_batch(keep_indices_device)
Lianmin Zheng's avatar
Lianmin Zheng committed
1668

1669
    def merge_batch(self, other: "ScheduleBatch"):
1670
1671
1672
        # Penalizer orchestrator must be merged before Batch.reqs is merged. This is because
        # orchestrator.merge() depends on Batch.reqs during preparation of each penalizers, so it
        # needs to be called with pre-merged Batch.reqs.
1673
        self.sampling_info.merge_batch(other.sampling_info)
1674

1675
1676
1677
1678
        # Encoder-decoder infos
        if self.model_config.is_encoder_decoder:
            self.encoder_lens = torch.cat([self.encoder_lens, other.encoder_lens])
            self.encoder_lens_cpu.extend(other.encoder_lens_cpu)
1679
        self.req_pool_indices = torch.cat(
Lianmin Zheng's avatar
Lianmin Zheng committed
1680
1681
            [self.req_pool_indices, other.req_pool_indices]
        )
1682
        self.seq_lens = torch.cat([self.seq_lens, other.seq_lens])
1683
        self.orig_seq_lens = torch.cat([self.orig_seq_lens, other.orig_seq_lens])
1684
        self.out_cache_loc = None
1685
        self.seq_lens_sum += other.seq_lens_sum
1686
        if self.output_ids is not None:
1687
            self.output_ids = torch.cat([self.output_ids, other.output_ids])
1688
1689
        if self.return_logprob and other.return_logprob:
            self.top_logprobs_nums.extend(other.top_logprobs_nums)
1690
            self.token_ids_logprobs.extend(other.token_ids_logprobs)
1691
1692
        elif self.return_logprob:
            self.top_logprobs_nums.extend([0] * len(other.reqs))
1693
            self.token_ids_logprobs.extend([None] * len(other.reqs))
1694
1695
        elif other.return_logprob:
            self.top_logprobs_nums = [0] * len(self.reqs) + other.top_logprobs_nums
1696
            self.token_ids_logprobs = [None] * len(self.reqs) + other.token_ids_logprobs
1697
        self.reqs.extend(other.reqs)
1698
1699
        if self.multimodal_inputs is not None:
            self.multimodal_inputs.extend(other.multimodal_inputs)
1700

1701
1702
1703
        self.return_logprob |= other.return_logprob
        self.has_stream |= other.has_stream
        self.has_grammar |= other.has_grammar
1704
        self.return_hidden_states |= other.return_hidden_states
1705

1706
1707
1708
        if self.spec_info:
            self.spec_info.merge_batch(other.spec_info)

1709
1710
1711
    def get_model_worker_batch(
        self, seq_lens_cpu_cache: Optional[torch.Tensor] = None
    ) -> ModelWorkerBatch:
1712
        if self.forward_mode.is_decode_or_idle():
1713
            extend_seq_lens = extend_prefix_lens = extend_logprob_start_lens = None
1714
1715
1716
1717
1718
        else:
            extend_seq_lens = self.extend_lens
            extend_prefix_lens = self.prefix_lens
            extend_logprob_start_lens = self.extend_logprob_start_lens

1719
        if self.sampling_info:
Ke Bao's avatar
Ke Bao committed
1720
1721
1722
1723
            if self.has_grammar:
                self.sampling_info.grammars = [req.grammar for req in self.reqs]
            else:
                self.sampling_info.grammars = None
1724

Lianmin Zheng's avatar
Lianmin Zheng committed
1725
1726
1727
1728
1729
1730
        seq_lens_cpu = (
            seq_lens_cpu_cache
            if seq_lens_cpu_cache is not None
            else self.seq_lens.cpu()
        )

1731
1732
        global bid
        bid += 1
1733
        return ModelWorkerBatch(
1734
            bid=bid,
1735
1736
1737
1738
            forward_mode=self.forward_mode,
            input_ids=self.input_ids,
            req_pool_indices=self.req_pool_indices,
            seq_lens=self.seq_lens,
1739
            orig_seq_lens=self.orig_seq_lens,
1740
            out_cache_loc=self.out_cache_loc,
1741
            seq_lens_cpu=seq_lens_cpu,
1742
            seq_lens_sum=self.seq_lens_sum,
1743
1744
            return_logprob=self.return_logprob,
            top_logprobs_nums=self.top_logprobs_nums,
1745
            token_ids_logprobs=self.token_ids_logprobs,
Ke Bao's avatar
Ke Bao committed
1746
            global_num_tokens=self.global_num_tokens,
1747
            global_num_tokens_for_logprob=self.global_num_tokens_for_logprob,
1748
            is_extend_in_batch=self.is_extend_in_batch,
1749
            can_run_dp_cuda_graph=self.can_run_dp_cuda_graph,
1750
1751
            tbo_split_seq_index=self.tbo_split_seq_index,
            global_forward_mode=self.global_forward_mode,
1752
            extend_num_tokens=self.extend_num_tokens,
1753
1754
1755
            extend_seq_lens=extend_seq_lens,
            extend_prefix_lens=extend_prefix_lens,
            extend_logprob_start_lens=extend_logprob_start_lens,
1756
            multimodal_inputs=self.multimodal_inputs,
1757
1758
1759
1760
            encoder_cached=self.encoder_cached,
            encoder_lens=self.encoder_lens,
            encoder_lens_cpu=self.encoder_lens_cpu,
            encoder_out_cache_loc=self.encoder_out_cache_loc,
1761
            lora_ids=[req.lora_id for req in self.reqs],
1762
            sampling_info=self.sampling_info,
Rin Intachuen's avatar
Rin Intachuen committed
1763
            input_embeds=self.input_embeds,
woodx's avatar
woodx committed
1764
            token_type_ids=self.token_type_ids,
1765
1766
            spec_algorithm=self.spec_algorithm,
            spec_info=self.spec_info,
1767
            hicache_consumer_index=self.hicache_consumer_index,
Lianmin Zheng's avatar
Lianmin Zheng committed
1768
            capture_hidden_mode=(
1769
                CaptureHiddenMode.FULL
1770
                if self.return_hidden_states
1771
1772
1773
1774
1775
1776
1777
                else (
                    getattr(
                        self.spec_info, "capture_hidden_mode", CaptureHiddenMode.NULL
                    )
                    if self.spec_info
                    else CaptureHiddenMode.NULL
                )
Lianmin Zheng's avatar
Lianmin Zheng committed
1778
            ),
1779
            extend_input_logprob_token_ids=self.extend_input_logprob_token_ids,
1780
            launch_done=self.launch_done,
1781
1782
        )

1783
    def copy(self):
1784
        # Only contain fields that will be used by process_batch_result
1785
1786
        return ScheduleBatch(
            reqs=self.reqs,
1787
            model_config=self.model_config,
1788
            forward_mode=self.forward_mode,
1789
1790
            out_cache_loc=self.out_cache_loc,
            return_logprob=self.return_logprob,
1791
            decoding_reqs=self.decoding_reqs,
1792
            spec_algorithm=self.spec_algorithm,
1793
1794
1795
1796
            global_num_tokens=self.global_num_tokens,
            global_num_tokens_for_logprob=self.global_num_tokens_for_logprob,
            can_run_dp_cuda_graph=self.can_run_dp_cuda_graph,
            is_extend_in_batch=self.is_extend_in_batch,
1797
            is_prefill_only=self.is_prefill_only,
1798
1799
        )

Lianmin Zheng's avatar
Lianmin Zheng committed
1800
1801
    def _evict_tree_cache_if_needed(self, num_tokens: int):
        if isinstance(self.tree_cache, (SWAChunkCache, ChunkCache)):
Hanming Lu's avatar
Hanming Lu committed
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
            return

        if self.is_hybrid:
            full_available_size = self.token_to_kv_pool_allocator.full_available_size()
            swa_available_size = self.token_to_kv_pool_allocator.swa_available_size()

            if full_available_size < num_tokens or swa_available_size < num_tokens:
                if self.tree_cache is not None:
                    full_num_tokens = max(0, num_tokens - full_available_size)
                    swa_num_tokens = max(0, num_tokens - swa_available_size)
                    self.tree_cache.evict(full_num_tokens, swa_num_tokens)
        else:
            if self.token_to_kv_pool_allocator.available_size() < num_tokens:
                if self.tree_cache is not None:
                    self.tree_cache.evict(num_tokens)

    def _is_available_size_sufficient(self, num_tokens: int) -> bool:
        if self.is_hybrid:
            return (
                self.token_to_kv_pool_allocator.full_available_size() >= num_tokens
                and self.token_to_kv_pool_allocator.swa_available_size() >= num_tokens
            )
        else:
            return self.token_to_kv_pool_allocator.available_size() >= num_tokens

    def _available_and_evictable_str(self) -> str:
        if self.is_hybrid:
            full_available_size = self.token_to_kv_pool_allocator.full_available_size()
            swa_available_size = self.token_to_kv_pool_allocator.swa_available_size()
            full_evictable_size = self.tree_cache.full_evictable_size()
            swa_evictable_size = self.tree_cache.swa_evictable_size()
            return (
                f"Available full tokens: {full_available_size + full_evictable_size} ({full_available_size=} + {full_evictable_size=})\n"
                f"Available swa tokens: {swa_available_size + swa_evictable_size} ({swa_available_size=} + {swa_evictable_size=})\n"
                f"Full LRU list evictable size: {self.tree_cache.full_lru_list_evictable_size()}\n"
                f"SWA LRU list evictable size: {self.tree_cache.swa_lru_list_evictable_size()}\n"
            )
        else:
            available_size = self.token_to_kv_pool_allocator.available_size()
            evictable_size = self.tree_cache.evictable_size()
            return f"Available tokens: {available_size + evictable_size} ({available_size=} + {evictable_size=})\n"

1844
1845
    def __str__(self):
        return (
1846
            f"ScheduleBatch(forward_mode={self.forward_mode.name if self.forward_mode else 'None'}, "
1847
1848
1849
            f"#req={(len(self.reqs))})"
        )

Chayenne's avatar
Chayenne committed
1850

1851
@dataclasses.dataclass
1852
class ModelWorkerBatch:
1853
1854
    # The batch id
    bid: int
1855
1856
1857
    # The forward mode
    forward_mode: ForwardMode
    # The input ids
1858
    input_ids: torch.Tensor
1859
1860
1861
1862
    # The indices of requests in the req_to_token_pool
    req_pool_indices: torch.Tensor
    # The sequence length
    seq_lens: torch.Tensor
1863
    # The indices of output tokens in the token_to_kv_pool_allocator
1864
    out_cache_loc: torch.Tensor
1865
1866
    # The sequence length tensor on CPU
    seq_lens_cpu: Optional[torch.Tensor]
1867
1868
    seq_lens_sum: int

1869
1870
1871
    # For logprob
    return_logprob: bool
    top_logprobs_nums: Optional[List[int]]
1872
    token_ids_logprobs: Optional[List[List[int]]]
1873

Ke Bao's avatar
Ke Bao committed
1874
1875
    # For DP attention
    global_num_tokens: Optional[List[int]]
1876
    global_num_tokens_for_logprob: Optional[List[int]]
1877
    is_extend_in_batch: bool
1878
    can_run_dp_cuda_graph: bool
1879
1880
    tbo_split_seq_index: Optional[int]
    global_forward_mode: Optional[ForwardMode]
Ke Bao's avatar
Ke Bao committed
1881

1882
    # For extend
1883
    extend_num_tokens: Optional[int]
1884
1885
1886
    extend_seq_lens: Optional[List[int]]
    extend_prefix_lens: Optional[List[int]]
    extend_logprob_start_lens: Optional[List[int]]
1887
    extend_input_logprob_token_ids: Optional[torch.Tensor]
1888
1889

    # For multimodal
Mick's avatar
Mick committed
1890
    multimodal_inputs: Optional[List[MultimodalInputs]]
1891

1892
1893
1894
1895
1896
1897
    # For encoder-decoder
    encoder_cached: Optional[List[bool]]
    encoder_lens: Optional[torch.Tensor]
    encoder_lens_cpu: Optional[List[int]]
    encoder_out_cache_loc: Optional[torch.Tensor]

1898
    # For LoRA
1899
    lora_ids: Optional[List[str]]
1900
1901
1902

    # Sampling info
    sampling_info: SamplingBatchInfo
1903

1904
1905
1906
    # The original sequence lengths, Qwen-1M related
    orig_seq_lens: Optional[torch.Tensor] = None

Rin Intachuen's avatar
Rin Intachuen committed
1907
    # The input Embeds
Cheng Wan's avatar
Cheng Wan committed
1908
    input_embeds: Optional[torch.Tensor] = None
Rin Intachuen's avatar
Rin Intachuen committed
1909

woodx's avatar
woodx committed
1910
1911
1912
    # For corss-encoder model
    token_type_ids: Optional[torch.Tensor] = None

1913
    # Speculative decoding
1914
    spec_algorithm: SpeculativeAlgorithm = None
1915
1916
    spec_info: Optional[Union[EagleVerifyInput, EagleDraftInput]] = None
    # If set, the output of the batch contains the hidden states of the run.
Lianmin Zheng's avatar
Lianmin Zheng committed
1917
    capture_hidden_mode: CaptureHiddenMode = None
1918
    hicache_consumer_index: int = 0
1919

1920
1921
1922
    # Overlap event
    launch_done: Optional[threading.Event] = None

1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940

@triton.jit
def write_req_to_token_pool_triton(
    req_to_token_ptr,  # [max_batch, max_context_len]
    req_pool_indices,
    pre_lens,
    seq_lens,
    extend_lens,
    out_cache_loc,
    req_to_token_ptr_stride: tl.constexpr,
):
    BLOCK_SIZE: tl.constexpr = 512
    pid = tl.program_id(0)

    req_pool_index = tl.load(req_pool_indices + pid)
    pre_len = tl.load(pre_lens + pid)
    seq_len = tl.load(seq_lens + pid)

Lianmin Zheng's avatar
Lianmin Zheng committed
1941
1942
    # NOTE: This can be slow for large bs
    cumsum_start = tl.cast(0, tl.int64)
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
    for i in range(pid):
        cumsum_start += tl.load(extend_lens + i)

    num_loop = tl.cdiv(seq_len - pre_len, BLOCK_SIZE)
    for i in range(num_loop):
        offset = tl.arange(0, BLOCK_SIZE) + i * BLOCK_SIZE
        mask = offset < (seq_len - pre_len)
        value = tl.load(out_cache_loc + cumsum_start + offset, mask=mask)
        tl.store(
            req_to_token_ptr
            + req_pool_index * req_to_token_ptr_stride
            + offset
            + pre_len,
            value,
            mask=mask,
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
1959
1960


1961
1962
1963
1964
1965
def get_last_loc(
    req_to_token: torch.Tensor,
    req_pool_indices_tensor: torch.Tensor,
    prefix_lens_tensor: torch.Tensor,
) -> torch.Tensor:
1966
1967
1968
1969
    if (
        global_server_args_dict["attention_backend"] != "ascend"
        and global_server_args_dict["attention_backend"] != "torch_native"
    ):
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
        impl = get_last_loc_triton
    else:
        impl = get_last_loc_torch

    return impl(req_to_token, req_pool_indices_tensor, prefix_lens_tensor)


def get_last_loc_torch(
    req_to_token: torch.Tensor,
    req_pool_indices_tensor: torch.Tensor,
    prefix_lens_tensor: torch.Tensor,
) -> torch.Tensor:
Lianmin Zheng's avatar
Lianmin Zheng committed
1982
1983
1984
1985
1986
    return torch.where(
        prefix_lens_tensor > 0,
        req_to_token[req_pool_indices_tensor, prefix_lens_tensor - 1],
        torch.full_like(prefix_lens_tensor, -1),
    )
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032


@triton.jit
def get_last_loc_kernel(
    req_to_token,
    req_pool_indices_tensor,
    prefix_lens_tensor,
    result,
    num_tokens,
    req_to_token_stride,
    BLOCK_SIZE: tl.constexpr,
):
    pid = tl.program_id(0)
    offset = tl.arange(0, BLOCK_SIZE) + pid * BLOCK_SIZE
    mask = offset < num_tokens

    prefix_lens = tl.load(prefix_lens_tensor + offset, mask=mask, other=0)
    req_pool_indices = tl.load(req_pool_indices_tensor + offset, mask=mask, other=0)

    token_mask = prefix_lens > 0
    token_index = req_pool_indices * req_to_token_stride + (prefix_lens - 1)
    tokens = tl.load(req_to_token + token_index, mask=token_mask, other=-1)

    tl.store(result + offset, tokens, mask=mask)


def get_last_loc_triton(
    req_to_token: torch.Tensor,
    req_pool_indices_tensor: torch.Tensor,
    prefix_lens_tensor: torch.Tensor,
) -> torch.Tensor:
    BLOCK_SIZE = 256
    num_tokens = prefix_lens_tensor.shape[0]
    result = torch.empty_like(prefix_lens_tensor)
    grid = (triton.cdiv(num_tokens, BLOCK_SIZE),)

    get_last_loc_kernel[grid](
        req_to_token,
        req_pool_indices_tensor,
        prefix_lens_tensor,
        result,
        num_tokens,
        req_to_token.stride(0),
        BLOCK_SIZE,
    )
    return result