schedule_batch.py 76.8 KB
Newer Older
1
2
from __future__ import annotations

3
4
5
6
7
8
9
10
11
12
13
14
15
# Copyright 2023-2024 SGLang Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
16
17
18
19
20
21
22
23
24
25
"""
Store information about requests and batches.

The following is the flow of data structures for a batch:

ScheduleBatch -> ModelWorkerBatch -> ForwardBatch

- ScheduleBatch is managed by `scheduler.py::Scheduler`.
  It contains high-level scheduling data. Most of the data is on the CPU.
- ModelWorkerBatch is managed by `tp_worker.py::TpModelWorker`.
26
27
  It is a subset of `ScheduleBatch` that only contains data related to the model forward on GPU.
  It will be transformed from CPU scheduler to GPU model runner.
28
29
- ForwardBatch is managed by `model_runner.py::ModelRunner`.
  It contains low-level tensor data. Most of the data consists of GPU tensors.
Lianmin Zheng's avatar
Lianmin Zheng committed
30
31

TODO(lmzheng): ModelWorkerBatch seems a bit redundant and we consider removing it in the future.
32
"""
Lianmin Zheng's avatar
Lianmin Zheng committed
33

34
import copy
35
import dataclasses
Ying Sheng's avatar
Ying Sheng committed
36
import logging
37
import threading
Lianmin Zheng's avatar
Lianmin Zheng committed
38
from enum import Enum, auto
39
from http import HTTPStatus
40
from itertools import chain
Yi Zhang's avatar
Yi Zhang committed
41
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union
Lianmin Zheng's avatar
Lianmin Zheng committed
42

43
import numpy as np
Lianmin Zheng's avatar
Lianmin Zheng committed
44
import torch
45
46
import triton
import triton.language as tl
47

Liangsheng Yin's avatar
Liangsheng Yin committed
48
from sglang.global_config import global_config
49
from sglang.srt.constrained.base_grammar_backend import BaseGrammarObject
50
from sglang.srt.disaggregation.base import BaseKVSender
Byron Hsu's avatar
Byron Hsu committed
51
52
53
from sglang.srt.disaggregation.decode_schedule_batch_mixin import (
    ScheduleBatchDisaggregationDecodeMixin,
)
54
from sglang.srt.distributed.parallel_state import get_tensor_model_parallel_rank
Hanming Lu's avatar
Hanming Lu committed
55
56
57
58
from sglang.srt.mem_cache.allocator import (
    BaseTokenToKVPoolAllocator,
    SWATokenToKVPoolAllocator,
)
59
from sglang.srt.mem_cache.base_prefix_cache import BasePrefixCache
tarinkk's avatar
tarinkk committed
60
from sglang.srt.mem_cache.chunk_cache import ChunkCache, SWAChunkCache
61
from sglang.srt.mem_cache.lora_radix_cache import LoRAKey, LoRARadixCache
Yi Zhang's avatar
Yi Zhang committed
62
from sglang.srt.mem_cache.memory_pool import HybridReqToTokenPool, ReqToTokenPool
Hanming Lu's avatar
Hanming Lu committed
63
from sglang.srt.mem_cache.swa_radix_cache import SWARadixCache
64
from sglang.srt.metrics.collector import TimeStats
Lianmin Zheng's avatar
Lianmin Zheng committed
65
from sglang.srt.model_executor.forward_batch_info import CaptureHiddenMode, ForwardMode
66
from sglang.srt.sampling.sampling_batch_info import SamplingBatchInfo
67
from sglang.srt.sampling.sampling_params import SamplingParams
68
from sglang.srt.server_args import ServerArgs
69
from sglang.srt.utils import flatten_nested_list, support_triton
Liangsheng Yin's avatar
Liangsheng Yin committed
70

71
if TYPE_CHECKING:
Cheng Wan's avatar
Cheng Wan committed
72
    from sglang.srt.configs.model_config import ModelConfig
73
74
75
    from sglang.srt.speculative.eagle_utils import EagleDraftInput, EagleVerifyInput
    from sglang.srt.speculative.spec_info import SpeculativeAlgorithm

Liangsheng Yin's avatar
Liangsheng Yin committed
76
INIT_INCREMENTAL_DETOKENIZATION_OFFSET = 5
Lianmin Zheng's avatar
Lianmin Zheng committed
77

78
79
GLOBAL_SERVER_ARGS_KEYS = [
    "attention_backend",
80
    "mm_attention_backend",
81
82
83
84
85
    "debug_tensor_dump_inject",
    "debug_tensor_dump_output_folder",
    "chunked_prefill_size",
    "device",
    "disable_chunked_prefix_cache",
86
    "disable_flashinfer_cutlass_moe_fp4_allgather",
87
88
    "disable_radix_cache",
    "enable_dp_lm_head",
89
    "flashinfer_mxfp4_moe_precision",
90
    "enable_flashinfer_allreduce_fusion",
91
92
93
    "moe_dense_tp_size",
    "ep_dispatch_algorithm",
    "ep_num_redundant_experts",
94
95
96
97
98
99
    "enable_nan_detection",
    "flashinfer_mla_disable_ragged",
    "max_micro_batch_size",
    "disable_shared_experts_fusion",
    "sampling_backend",
    "speculative_accept_threshold_single",
100
    "speculative_accept_threshold_acc",
101
    "speculative_attention_mode",
102
103
    "torchao_config",
    "triton_attention_reduce_in_fp32",
104
    "num_reserved_decode_tokens",
105
    "weight_loader_disable_mmap",
106
    "enable_multimodal",
107
    "enable_symm_mem",
108
    "quantization",
Lianmin Zheng's avatar
Lianmin Zheng committed
109
    "enable_custom_logit_processor",
110
    "disaggregation_mode",
111
112
]

113
# Put some global args for easy access
114
global_server_args_dict = {k: getattr(ServerArgs, k) for k in GLOBAL_SERVER_ARGS_KEYS}
115

Ying Sheng's avatar
Ying Sheng committed
116
117
118
logger = logging.getLogger(__name__)


119
120
121
class BaseFinishReason:
    def __init__(self, is_error: bool = False):
        self.is_error = is_error
Lianmin Zheng's avatar
Lianmin Zheng committed
122

123
    def to_json(self):
124
        raise NotImplementedError()
125
126
127


class FINISH_MATCHED_TOKEN(BaseFinishReason):
Mingyi's avatar
Mingyi committed
128
    def __init__(self, matched: Union[int, List[int]]):
129
130
131
        super().__init__()
        self.matched = matched

132
133
134
135
136
    def to_json(self):
        return {
            "type": "stop",  # to match OpenAI API's return value
            "matched": self.matched,
        }
137
138


139
140
class FINISH_MATCHED_STR(BaseFinishReason):
    def __init__(self, matched: str):
141
        super().__init__()
142
        self.matched = matched
143

144
145
146
147
148
    def to_json(self):
        return {
            "type": "stop",  # to match OpenAI API's return value
            "matched": self.matched,
        }
149
150


151
152
class FINISH_LENGTH(BaseFinishReason):
    def __init__(self, length: int):
153
        super().__init__()
154
        self.length = length
155

156
157
158
159
160
    def to_json(self):
        return {
            "type": "length",  # to match OpenAI API's return value
            "length": self.length,
        }
161
162
163


class FINISH_ABORT(BaseFinishReason):
Lianmin Zheng's avatar
Lianmin Zheng committed
164
    def __init__(self, message=None, status_code=None, err_type=None):
165
        super().__init__(is_error=True)
Lianmin Zheng's avatar
Lianmin Zheng committed
166
        self.message = message or "Aborted"
167
168
        self.status_code = status_code
        self.err_type = err_type
169

170
171
172
    def to_json(self):
        return {
            "type": "abort",
Lianmin Zheng's avatar
Lianmin Zheng committed
173
            "message": self.message,
174
175
            "status_code": self.status_code,
            "err_type": self.err_type,
176
        }
177

Lianmin Zheng's avatar
Lianmin Zheng committed
178

Mick's avatar
Mick committed
179
180
181
182
183
184
class Modality(Enum):
    IMAGE = auto()
    MULTI_IMAGES = auto()
    VIDEO = auto()
    AUDIO = auto()

185
186
187
188
189
190
191
192
193
    @staticmethod
    def from_str(modality_str: str):
        try:
            return Modality[modality_str.upper()]
        except KeyError:
            raise ValueError(
                f"Invalid modality string: {modality_str}. Valid modalities are: {[m.name for m in Modality]}"
            )

194
195
196
197
    @staticmethod
    def all():
        return [Modality.IMAGE, Modality.VIDEO, Modality.AUDIO]

Mick's avatar
Mick committed
198

199
@dataclasses.dataclass
Mick's avatar
Mick committed
200
201
class MultimodalDataItem:
    """
202
203
204
    One MultimodalDataItem contains all inputs for one modality.
    For example, if there are 3 images and 1 audio inputs, there will be 2 MultimodalDataItem.
    One for images and one for audio.
205

206
    We put the common fields first and the model-specific fields in model_specific_data.
Mick's avatar
Mick committed
207
    """
208

Mick's avatar
Mick committed
209
210
211
    modality: Modality
    hash: int = None
    pad_value: int = None
212
    offsets: Optional[list] = None
Mick's avatar
Mick committed
213

214
215
    # the raw features returned by processor, e.g. pixel_values or audio_features
    feature: Union[torch.Tensor, np.ndarray] = None
Mick's avatar
Mick committed
216
217
    # the precomputed embeddings, passed as final encoder embeddings
    # One and only one of the feature and precomputed_embeddings will be empty
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
    precomputed_embeddings: Optional[Union[torch.Tensor, np.ndarray]] = None

    # Model-specific data stored in a dictionary
    model_specific_data: dict[str, Any] = dataclasses.field(default_factory=dict)

    def __getattr__(self, name: str):
        if (
            "model_specific_data" in self.__dict__
            and name in self.__dict__["model_specific_data"]
        ):
            return self.__dict__["model_specific_data"][name]
        else:
            raise AttributeError(
                f"'{self.__class__.__name__}' object has no attribute '{name}'"
            )
Mick's avatar
Mick committed
233

234
235
236
237
238
    def __setitem__(self, key: str, value: Any):
        if key in self.__dict__:
            self.__dict__[key] = value
        else:
            self.model_specific_data[key] = value
239

240
241
    def set(self, key: str, value: Any):
        self.__setitem__(key, value)
242

Mick's avatar
Mick committed
243
244
245
246
247
248
249
250
    @staticmethod
    def is_empty_list(l):
        if l is None:
            return True
        return len([item for item in flatten_nested_list(l) if item is not None]) == 0

    def set_pad_value(self):
        """
Mick's avatar
Mick committed
251
        Set the pad value after first hashing the data
Mick's avatar
Mick committed
252
        """
253
        from sglang.srt.managers.mm_utils import hash_feature
Mick's avatar
Mick committed
254

255
        if self.hash is None:
256
257
            if self.feature is not None:
                hashed_feature = self.feature
258
            else:
259
                hashed_feature = self.precomputed_embeddings
260
            self.hash = hash_feature(hashed_feature)
Mick's avatar
Mick committed
261
262
263
        assert self.hash is not None
        self.pad_value = self.hash % (1 << 30)

264
265
266
    def is_modality(self, modality: Modality) -> bool:
        return self.modality == modality

Mick's avatar
Mick committed
267
    def is_audio(self):
268
        return self.modality == Modality.AUDIO
Mick's avatar
Mick committed
269
270

    def is_image(self):
271
        return self.modality in [Modality.IMAGE, Modality.MULTI_IMAGES]
Mick's avatar
Mick committed
272
273

    def is_video(self):
274
        return self.modality == Modality.VIDEO
Mick's avatar
Mick committed
275

276
277
278
    def is_valid(self) -> bool:
        return self.is_image() or self.is_video() or self.is_audio()

Mick's avatar
Mick committed
279
280
281
282
    def validate(self):
        ...
        # TODO

283
284
285
286
287
288
289
290
291
292
    @staticmethod
    def from_dict(obj: dict):
        kwargs = dict(obj)
        modality = kwargs.pop("modality")
        if isinstance(modality, str):
            modality = Modality[modality]
        ret = MultimodalDataItem(modality=modality, **kwargs)
        ret.validate()
        return ret

293
    def merge(self, other):
294
        self.feature += other.feature
295
        self.offsets += other.offsets
296
297
298
        self.hash = hash((self.hash, other.hash))
        self.set_pad_value()

Mick's avatar
Mick committed
299
300
301
302
303
304
305

@dataclasses.dataclass
class MultimodalInputs:
    """The multimodal data related inputs."""

    # items of data
    mm_items: List[MultimodalDataItem]
306
    image_pad_len: Optional[list] = None
307
    num_image_tokens: Optional[int] = None
Liangsheng Yin's avatar
Liangsheng Yin committed
308

Mick's avatar
Mick committed
309
    # image
Mick's avatar
Mick committed
310
    im_token_id: Optional[int] = None
311
312
313
314
    im_start_id: Optional[int] = None
    im_end_id: Optional[int] = None
    slice_start_id: Optional[int] = None
    slice_end_id: Optional[int] = None
Mick's avatar
Mick committed
315
316
317

    # video
    video_token_id: Optional[int] = None
Mick's avatar
Mick committed
318

Mick's avatar
Mick committed
319
    # audio
320
321
322
    audio_token_id: Optional[int] = None
    audio_start_id: Optional[int] = None
    audio_end_id: Optional[int] = None
Mick's avatar
Mick committed
323

324
325
326
327
    # QWen2-VL related
    mrope_positions: Optional[torch.Tensor] = None
    mrope_position_delta: Optional[torch.Tensor] = None

Liangsheng Yin's avatar
Liangsheng Yin committed
328
    @staticmethod
329
    def from_dict(obj: dict):
Mick's avatar
Mick committed
330
        ret = MultimodalInputs(
Mick's avatar
Mick committed
331
            mm_items=obj["mm_items"],
Liangsheng Yin's avatar
Liangsheng Yin committed
332
        )
333

Mick's avatar
Mick committed
334
        assert isinstance(ret.mm_items, list)
335
        ret.mm_items = [item for item in ret.mm_items if item.is_valid()]
Mick's avatar
Mick committed
336
337
        for item in ret.mm_items:
            item.set_pad_value()
338
339

        optional_args = [
340
341
            "mrope_positions",
            "mrope_position_delta",
342
            "im_token_id",
Mick's avatar
Mick committed
343
344
            "im_start_id",
            "im_end_id",
345
            "video_token_id",
Mick's avatar
Mick committed
346
347
            "slice_start_id",
            "slice_end_id",
Mick's avatar
Mick committed
348
349
            "audio_start_id",
            "audio_end_id",
350
            "audio_token_id",
351
352
353
354
355
        ]
        for arg in optional_args:
            if arg in obj:
                setattr(ret, arg, obj[arg])

Liangsheng Yin's avatar
Liangsheng Yin committed
356
357
        return ret

Mick's avatar
Mick committed
358
    def contains_image_inputs(self) -> bool:
Mick's avatar
Mick committed
359
        return any(item.is_image() for item in self.mm_items)
Mick's avatar
Mick committed
360

361
362
363
    def contains_video_inputs(self) -> bool:
        return any(item.is_video() for item in self.mm_items)

Mick's avatar
Mick committed
364
    def contains_audio_inputs(self) -> bool:
Mick's avatar
Mick committed
365
366
        return any(item.is_audio() for item in self.mm_items)

367
368
    def contains_mm_input(self) -> bool:
        return any(True for item in self.mm_items if item.is_valid())
Mick's avatar
Mick committed
369
370

    def merge(self, other: MultimodalInputs):
371
372
373
        """
        merge image inputs when requests are being merged
        """
374

375
        # args needed to be merged
376
        optional_args = [
Mick's avatar
Mick committed
377
            "mm_items",
378
            "image_pad_len",
379
380
        ]
        for arg in optional_args:
381
382
383
            self_arg = getattr(self, arg, None)
            if self_arg is not None:
                setattr(self, arg, self_arg + getattr(other, arg))
384
385
386
387
388
389
390
391
392
393

        mrope_positions = self.mrope_positions
        if mrope_positions is not None:
            if other.mrope_positions is None:
                self.mrope_positions = mrope_positions
            else:
                self.mrope_positions = torch.cat(
                    [self.mrope_positions, other.mrope_positions], dim=1
                )

394
395
396
397
398
399
400
401
        mrope_position_delta = self.mrope_position_delta
        if mrope_position_delta is not None:
            if other.mrope_position_delta is None:
                self.mrope_position_delta = mrope_position_delta
            else:
                self.mrope_position_delta = torch.cat(
                    [self.mrope_position_delta, other.mrope_position_delta], dim=0
                )
402
403
404
405
406
407

        for key, val in other.__dict__.items():
            if "_id" in key:
                # set token_ids
                if getattr(self, key, None) is None:
                    setattr(self, key, getattr(other, key, None))
408
        # other args would be kept intact
409

Liangsheng Yin's avatar
Liangsheng Yin committed
410

Lianmin Zheng's avatar
Lianmin Zheng committed
411
class Req:
412
    """The input and output status of a request."""
413

414
415
416
417
    def __init__(
        self,
        rid: str,
        origin_input_text: str,
418
        origin_input_ids: List[int],
419
        sampling_params: SamplingParams,
Lianmin Zheng's avatar
Lianmin Zheng committed
420
421
        return_logprob: bool = False,
        top_logprobs_num: int = 0,
422
        token_ids_logprob: List[int] = None,
Lianmin Zheng's avatar
Lianmin Zheng committed
423
        stream: bool = False,
424
        origin_input_ids_unpadded: Optional[Tuple[int]] = None,
425
        lora_id: Optional[str] = None,
Rin Intachuen's avatar
Rin Intachuen committed
426
        input_embeds: Optional[List[List[float]]] = None,
woodx's avatar
woodx committed
427
        token_type_ids: List[int] = None,
428
        session_id: Optional[str] = None,
429
        custom_logit_processor: Optional[str] = None,
430
        return_hidden_states: bool = False,
431
        eos_token_ids: Optional[Set[int]] = None,
432
        bootstrap_host: Optional[str] = None,
433
        bootstrap_port: Optional[int] = None,
434
        bootstrap_room: Optional[int] = None,
435
        data_parallel_rank: Optional[int] = None,
436
        vocab_size: Optional[int] = None,
437
    ):
438
        # Input and output info
Lianmin Zheng's avatar
Lianmin Zheng committed
439
        self.rid = rid
Liangsheng Yin's avatar
Liangsheng Yin committed
440
        self.origin_input_text = origin_input_text
441
442
443
444
445
        self.origin_input_ids_unpadded = (
            origin_input_ids_unpadded
            if origin_input_ids_unpadded
            else origin_input_ids  # Before image padding
        )
Liangsheng Yin's avatar
Liangsheng Yin committed
446
        self.origin_input_ids = origin_input_ids
447
448
449
        # Each decode stage's output ids
        self.output_ids = []
        # fill_ids = origin_input_ids + output_ids. Updated if chunked.
450
        self.fill_ids = []
451
        self.session_id = session_id
Lianmin Zheng's avatar
Lianmin Zheng committed
452
        self.input_embeds = input_embeds
453

woodx's avatar
woodx committed
454
455
456
        # for corss-endoder model
        self.token_type_ids = token_type_ids

tarinkk's avatar
tarinkk committed
457
458
459
        # The length of KV that have been removed in local attention chunked prefill
        self.evicted_seqlen_local = 0

Lianmin Zheng's avatar
Lianmin Zheng committed
460
        # Sampling info
461
462
463
464
465
        if isinstance(sampling_params.custom_params, dict):
            sampling_params = copy.copy(sampling_params)
            sampling_params.custom_params = sampling_params.custom_params | {
                "__req__": self
            }
466
        self.sampling_params = sampling_params
467
        self.custom_logit_processor = custom_logit_processor
468
        self.return_hidden_states = return_hidden_states
469
        self.lora_id = lora_id
Liangsheng Yin's avatar
Liangsheng Yin committed
470

471
        # Memory pool info
472
        self.req_pool_idx: Optional[int] = None
473

474
475
476
        # Check finish
        self.tokenizer = None
        self.finished_reason = None
Lianmin Zheng's avatar
Lianmin Zheng committed
477
478
        # Whether this request has finished output
        self.finished_output = None
479
480
        # If we want to abort the request in the middle of the event loop, set this to true
        # Note: We should never set finished_reason in the middle, the req will get filtered and never respond
481
        self.to_abort = False
Lianmin Zheng's avatar
Lianmin Zheng committed
482
        # This carries the error message for `.to_abort` and will be attached to the finished_reason at the end of the event loop
Lianmin Zheng's avatar
Lianmin Zheng committed
483
        self.to_abort_message: str = None
Lianmin Zheng's avatar
Lianmin Zheng committed
484
        self.stream = stream
485
        self.eos_token_ids = eos_token_ids
486
        self.vocab_size = vocab_size
487

488
        # For incremental decoding
489
490
491
492
493
494
495
496
        # ----- | --------- read_ids -------|
        # ----- |   surr_ids  |
        # xxxxx | xxxxxxxxxxx | xxxxxxxxxxx |
        # ----- ^ ----------- ^ ----------- ^
        # ----- 1 ----------- 2 ----------- 3
        # 1: surr_offset
        # 2: read_offset
        # 3: last token
Liangsheng Yin's avatar
Liangsheng Yin committed
497
498
        self.surr_offset = None  # Surrounding offset to defeat the cleanup algorithm
        self.read_offset = None
Lianmin Zheng's avatar
Lianmin Zheng committed
499
        self.decoded_text = ""
500

501
        # For multimodal inputs
Mick's avatar
Mick committed
502
        self.multimodal_inputs: Optional[MultimodalInputs] = None
503

504
        # Prefix info
505
        # The indices to kv cache for the shared prefix.
506
        self.prefix_indices: torch.Tensor = []
507
        # Number of tokens to run prefill.
508
        self.extend_input_len = 0
509
510
        # The relative logprob_start_len in an extend batch
        self.extend_logprob_start_len = 0
511
512
513
        self.last_node: Any = None
        self.last_host_node: Any = None
        self.host_hit_length = 0
Hanming Lu's avatar
Hanming Lu committed
514
515
        # The node to lock until for swa radix tree lock ref
        self.swa_uuid_for_lock: Optional[int] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
516

517
518
519
520
        # Whether or not if it is chunked. It increments whenever
        # it is chunked, and decrement whenever chunked request is
        # processed.
        self.is_chunked = 0
521

522
523
524
        # For retraction
        self.is_retracted = False

525
526
527
528
529
530
531
        # Incremental streamining
        self.send_token_offset: int = 0
        self.send_decode_id_offset: int = 0
        # TODO (Byron): send_output_token_logprobs_offset and send_decode_id_offset can be different in disaggregation mode
        # because the decode server does not have the first output token logprobs
        self.send_output_token_logprobs_offset: int = 0

532
        # Logprobs (arguments)
Lianmin Zheng's avatar
Lianmin Zheng committed
533
        self.return_logprob = return_logprob
534
        # Start index to compute logprob from.
535
        self.logprob_start_len = 0
Lianmin Zheng's avatar
Lianmin Zheng committed
536
        self.top_logprobs_num = top_logprobs_num
537
        self.token_ids_logprob = token_ids_logprob
Lianmin Zheng's avatar
Lianmin Zheng committed
538
539
        self.temp_scaled_logprobs = False
        self.top_p_normalized_logprobs = False
540

541
        # Logprobs (return values)
542
543
        # True means the input logprob has been already sent to detokenizer.
        self.input_logprob_sent: bool = False
544
545
546
547
        self.input_token_logprobs_val: Optional[List[float]] = None
        self.input_token_logprobs_idx: Optional[List[int]] = None
        self.input_top_logprobs_val: Optional[List[float]] = None
        self.input_top_logprobs_idx: Optional[List[int]] = None
548
549
550
551
552
553
554
555
        self.input_token_ids_logprobs_val: Optional[List[float]] = None
        self.input_token_ids_logprobs_idx: Optional[List[int]] = None
        # Temporary holder to store input_token_logprobs.
        self.input_token_logprobs: Optional[List[Tuple[int]]] = None
        self.temp_input_top_logprobs_val: Optional[List[torch.Tensor]] = None
        self.temp_input_top_logprobs_idx: Optional[List[int]] = None
        self.temp_input_token_ids_logprobs_val: Optional[List[float]] = None
        self.temp_input_token_ids_logprobs_idx: Optional[List[int]] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
556
557

        if return_logprob:
558
            # shape: (bs, 1)
Lianmin Zheng's avatar
Lianmin Zheng committed
559
560
            self.output_token_logprobs_val = []
            self.output_token_logprobs_idx = []
561
            # shape: (bs, k)
Lianmin Zheng's avatar
Lianmin Zheng committed
562
563
            self.output_top_logprobs_val = []
            self.output_top_logprobs_idx = []
564
565
566
567
            # Can contain either lists or GPU tensors (delayed copy optimization for prefill-only scoring)
            self.output_token_ids_logprobs_val: List[
                Union[List[float], torch.Tensor]
            ] = []
568
            self.output_token_ids_logprobs_idx = []
Lianmin Zheng's avatar
Lianmin Zheng committed
569
570
571
        else:
            self.output_token_logprobs_val = self.output_token_logprobs_idx = (
                self.output_top_logprobs_val
572
573
574
            ) = self.output_top_logprobs_idx = self.output_token_ids_logprobs_val = (
                self.output_token_ids_logprobs_idx
            ) = None
575
        self.hidden_states: List[List[float]] = []
576
        self.hidden_states_tensor = None  # Note: use tensor instead of list to transfer hidden_states when PD + MTP
577

578
        # Embedding (return values)
579
        self.embedding = None
Lianmin Zheng's avatar
Lianmin Zheng committed
580

581
        # Constrained decoding
582
        self.grammar: Optional[BaseGrammarObject] = None
583
        self.grammar_wait_ct = 0
Liangsheng Yin's avatar
Liangsheng Yin committed
584

585
        # The number of cached tokens that were already cached in the KV cache
586
        self.cached_tokens = 0
587
        self.already_computed = 0
588

589
590
591
        # The number of verification forward passes in the speculative decoding.
        # This is used to compute the average acceptance length per request.
        self.spec_verify_ct = 0
592
593
594
595
596
597

        # For metrics
        self.time_stats: TimeStats = TimeStats()
        self.has_log_time_stats: bool = False
        self.queue_time_start = None
        self.queue_time_end = None
598

Byron Hsu's avatar
Byron Hsu committed
599
        # For disaggregation
600
        self.bootstrap_host: str = bootstrap_host
601
        self.bootstrap_port: Optional[int] = bootstrap_port
602
        self.bootstrap_room: Optional[int] = bootstrap_room
603
        self.disagg_kv_sender: Optional[BaseKVSender] = None
Byron Hsu's avatar
Byron Hsu committed
604

605
606
607
        # For data parallel rank routing
        self.data_parallel_rank: Optional[int] = data_parallel_rank

Byron Hsu's avatar
Byron Hsu committed
608
609
610
611
612
613
614
        # the start index of the sent kv cache
        # We want to send it chunk by chunk for chunked prefill.
        # After every chunk forward, we do the following:
        # kv_send(req.input_ids[req.start_send_idx:len(req.fill_ids)])
        # start_send_idx = len(req.fill_ids)
        self.start_send_idx: int = 0

615
616
617
618
        # For overlap schedule, we delay the kv transfer until `process_batch_result_disagg_prefill` rather than `process_prefill_chunk` in non-overlap
        # This is because kv is not ready in `process_prefill_chunk`.
        # We use `tmp_end_idx` to store the end index of the kv cache to send.
        self.tmp_end_idx: int = -1
Lianmin Zheng's avatar
Lianmin Zheng committed
619
        self.metadata_buffer_index: int = -1
620

621
622
623
624
    @property
    def seqlen(self):
        return len(self.origin_input_ids) + len(self.output_ids)

625
626
627
628
629
    @property
    def is_prefill_only(self) -> bool:
        """Check if this request is prefill-only (no token generation needed)."""
        return self.sampling_params.max_new_tokens == 0

630
    def extend_image_inputs(self, image_inputs):
Mick's avatar
Mick committed
631
632
        if self.multimodal_inputs is None:
            self.multimodal_inputs = image_inputs
633
        else:
Mick's avatar
Mick committed
634
            self.multimodal_inputs.merge(image_inputs)
635

636
    def finished(self) -> bool:
Lianmin Zheng's avatar
Lianmin Zheng committed
637
        # Whether request reached finished condition
638
639
        return self.finished_reason is not None

640
641
642
643
    def init_next_round_input(
        self,
        tree_cache: Optional[BasePrefixCache] = None,
    ):
644
        self.fill_ids = self.origin_input_ids + self.output_ids
645
        if tree_cache is not None:
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
            if isinstance(tree_cache, LoRARadixCache):
                (
                    self.prefix_indices,
                    self.last_node,
                    self.last_host_node,
                    self.host_hit_length,
                ) = tree_cache.match_prefix_with_lora_id(
                    key=LoRAKey(
                        lora_id=self.lora_id, token_ids=self.adjust_max_prefix_ids()
                    ),
                )
            else:
                (
                    self.prefix_indices,
                    self.last_node,
                    self.last_host_node,
                    self.host_hit_length,
                ) = tree_cache.match_prefix(
                    key=self.adjust_max_prefix_ids(),
                )
666
        self.extend_input_len = len(self.fill_ids) - len(self.prefix_indices)
667

668
    def adjust_max_prefix_ids(self):
669
670
        self.fill_ids = self.origin_input_ids + self.output_ids
        input_len = len(self.fill_ids)
671
672
673
674

        # FIXME: To work around some bugs in logprob computation, we need to ensure each
        # request has at least one token. Later, we can relax this requirement and use `input_len`.
        max_prefix_len = input_len - 1
Liangsheng Yin's avatar
Liangsheng Yin committed
675
676
677
678
679

        if self.sampling_params.max_new_tokens > 0:
            # Need at least one token to compute logits
            max_prefix_len = min(max_prefix_len, input_len - 1)

680
        if self.return_logprob:
681
            max_prefix_len = min(max_prefix_len, self.logprob_start_len)
682

683
        max_prefix_len = max(max_prefix_len, 0)
684
        return self.fill_ids[:max_prefix_len]
685

Liangsheng Yin's avatar
Liangsheng Yin committed
686
    # Based on https://github.com/vllm-project/vllm/blob/7a64d24aad69e4d2548aa0bf528d9fe63428ab01/vllm/transformers_utils/detokenizer.py#L194-L313
687
    def init_incremental_detokenize(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
688
689
690
691
692
693
694
695
696
        first_iter = self.surr_offset is None or self.read_offset is None

        if first_iter:
            self.read_offset = len(self.origin_input_ids_unpadded)
            self.surr_offset = max(
                self.read_offset - INIT_INCREMENTAL_DETOKENIZATION_OFFSET, 0
            )

        all_ids = self.origin_input_ids_unpadded + self.output_ids
697
        return all_ids[self.surr_offset :], self.read_offset - self.surr_offset
Liangsheng Yin's avatar
Liangsheng Yin committed
698

699
    def check_finished(self):
700
        if self.finished():
701
702
            return

703
        if self.to_abort:
704
705
706
            self.finished_reason = FINISH_ABORT(
                message=self.to_abort_message,
            )
707
708
            return

Liangsheng Yin's avatar
Liangsheng Yin committed
709
        if len(self.output_ids) >= self.sampling_params.max_new_tokens:
710
711
712
            self.finished_reason = FINISH_LENGTH(
                length=self.sampling_params.max_new_tokens
            )
713
714
            return

715
716
717
718
719
        if self.grammar is not None:
            if self.grammar.is_terminated():
                self.finished_reason = FINISH_MATCHED_TOKEN(matched=self.output_ids[-1])
                return

720
        last_token_id = self.output_ids[-1]
721

722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
        if not self.sampling_params.ignore_eos:
            matched_eos = False

            # Check stop token ids
            if self.sampling_params.stop_token_ids:
                matched_eos = last_token_id in self.sampling_params.stop_token_ids
            if self.eos_token_ids:
                matched_eos |= last_token_id in self.eos_token_ids
            if self.tokenizer is not None:
                matched_eos |= last_token_id == self.tokenizer.eos_token_id
                if self.tokenizer.additional_stop_token_ids:
                    matched_eos |= (
                        last_token_id in self.tokenizer.additional_stop_token_ids
                    )
            if matched_eos:
                self.finished_reason = FINISH_MATCHED_TOKEN(matched=last_token_id)
                return
739

740
741
742
743
744
745
746
747
        if last_token_id > self.vocab_size or last_token_id < 0:
            if self.sampling_params.stop_token_ids:
                self.output_ids[-1] = next(iter(self.sampling_params.stop_token_ids))
            if self.eos_token_ids:
                self.output_ids[-1] = next(iter(self.eos_token_ids))
            self.finished_reason = FINISH_MATCHED_STR(matched="NaN happened")
            return

748
        # Check stop strings
749
750
751
752
753
754
        if len(self.sampling_params.stop_strs) > 0:
            tail_str = self.tokenizer.decode(
                self.output_ids[-(self.sampling_params.stop_str_max_len + 1) :]
            )

            for stop_str in self.sampling_params.stop_strs:
Liangsheng Yin's avatar
Liangsheng Yin committed
755
                if stop_str in tail_str or stop_str in self.decoded_text:
756
                    self.finished_reason = FINISH_MATCHED_STR(matched=stop_str)
757
758
                    return

759
760
761
    def reset_for_retract(self):
        self.prefix_indices = []
        self.last_node = None
Hanming Lu's avatar
Hanming Lu committed
762
        self.swa_uuid_for_lock = None
763
764
        self.extend_input_len = 0
        self.is_retracted = True
765
766
767
768
769
770
        self.input_token_logprobs = None
        self.temp_input_top_logprobs_val = None
        self.temp_input_top_logprobs_idx = None
        self.extend_logprob_start_len = 0
        self.is_chunked = 0
        self.req_pool_idx = None
771
        self.already_computed = 0
772

Lianmin Zheng's avatar
Lianmin Zheng committed
773
774
775
776
777
778
779
780
781
782
783
784
785
    def offload_kv_cache(self, req_to_token_pool, token_to_kv_pool_allocator):
        token_indices = req_to_token_pool.req_to_token[
            self.req_pool_idx, : self.seqlen - 1
        ]
        self.kv_cache_cpu = token_to_kv_pool_allocator.get_cpu_copy(token_indices)

    def load_kv_cache(self, req_to_token_pool, token_to_kv_pool_allocator):
        token_indices = req_to_token_pool.req_to_token[
            self.req_pool_idx, : self.seqlen - 1
        ]
        token_to_kv_pool_allocator.load_cpu_copy(self.kv_cache_cpu, token_indices)
        del self.kv_cache_cpu

786
787
788
789
790
791
792
793
794
795
796
797
    def log_time_stats(self):
        # If overlap schedule, we schedule one decode batch ahead so this gets called twice.
        if self.has_log_time_stats is True:
            return

        if self.bootstrap_room is not None:
            prefix = f"Req Time Stats(rid={self.rid}, bootstrap_room={self.bootstrap_room}, input len={len(self.origin_input_ids)}, output len={len(self.output_ids)}, type={self.time_stats.get_type().value})"
        else:
            prefix = f"Req Time Stats(rid={self.rid}, input len={len(self.origin_input_ids)}, output len={len(self.output_ids)}, type={self.time_stats.get_type().value})"
        logger.info(f"{prefix}: {self.time_stats}")
        self.has_log_time_stats = True

798
799
800
801
802
803
    def set_finish_with_abort(self, error_msg: str):
        if get_tensor_model_parallel_rank() == 0:
            logger.error(f"{error_msg}, {self.rid=}")
        self.multimodal_inputs = None
        self.grammar = None
        self.origin_input_ids = [0]  # set it to one token to skip the long prefill
804
        self.return_logprob = False
805
806
807
808
        self.finished_reason = FINISH_ABORT(
            error_msg, HTTPStatus.BAD_REQUEST, "BadRequestError"
        )

Lianmin Zheng's avatar
Lianmin Zheng committed
809
    def __repr__(self):
810
        return (
811
            f"Req(rid={self.rid}, "
Lianmin Zheng's avatar
Lianmin Zheng committed
812
813
814
            f"input_ids={self.origin_input_ids}, output_ids={self.output_ids}, "
            f"{self.grammar=}, "
            f"{self.sampling_params=})"
815
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
816
817


Lianmin Zheng's avatar
Lianmin Zheng committed
818
# Batch id
819
820
821
bid = 0


822
@dataclasses.dataclass
Byron Hsu's avatar
Byron Hsu committed
823
class ScheduleBatch(ScheduleBatchDisaggregationDecodeMixin):
824
    """Store all information of a batch on the scheduler."""
825

826
    # Request, memory pool, and cache
827
    reqs: List[Req]
828
    req_to_token_pool: ReqToTokenPool = None
829
    token_to_kv_pool_allocator: BaseTokenToKVPoolAllocator = None
830
    tree_cache: BasePrefixCache = None
Hanming Lu's avatar
Hanming Lu committed
831
    is_hybrid: bool = False
832

833
    # Batch configs
834
    model_config: ModelConfig = None
Liangsheng Yin's avatar
Liangsheng Yin committed
835
    forward_mode: ForwardMode = None
836
    enable_overlap: bool = False
Lianmin Zheng's avatar
Lianmin Zheng committed
837
838
839
840
    # Tell whether the current running batch is full so that we can skip
    # the check of whether to prefill new requests.
    # This is an optimization to reduce the overhead of the prefill check.
    batch_is_full: bool = False
841

842
843
844
    # Events
    launch_done: Optional[threading.Event] = None

845
846
847
    # For chunked prefill in PP
    chunked_req: Optional[Req] = None

848
    # Sampling info
849
    sampling_info: SamplingBatchInfo = None
850
    next_batch_sampling_info: SamplingBatchInfo = None
Liangsheng Yin's avatar
Liangsheng Yin committed
851

852
    # Batched arguments to model runner
Lianmin Zheng's avatar
Lianmin Zheng committed
853
    input_ids: torch.Tensor = None  # shape: [b], int64
854
    input_embeds: torch.Tensor = None  # shape: [b, hidden_size], float32
woodx's avatar
woodx committed
855
    token_type_ids: torch.Tensor = None  # shape: [b], int64
Lianmin Zheng's avatar
Lianmin Zheng committed
856
    req_pool_indices: torch.Tensor = None  # shape: [b], int64
857
    seq_lens: torch.Tensor = None  # shape: [b], int64
858
    # The output locations of the KV cache
Lianmin Zheng's avatar
Lianmin Zheng committed
859
860
    out_cache_loc: torch.Tensor = None  # shape: [b], int64
    output_ids: torch.Tensor = None  # shape: [b], int64
861

862
863
864
    # For multimodal inputs
    multimodal_inputs: Optional[List] = None

865
866
    # The sum of all sequence lengths
    seq_lens_sum: int = None
867
868
    # The original sequence lengths, Qwen-1M related
    orig_seq_lens: torch.Tensor = None  # shape: [b], int32
869

Ke Bao's avatar
Ke Bao committed
870
871
    # For DP attention
    global_num_tokens: Optional[List[int]] = None
872
    global_num_tokens_for_logprob: Optional[List[int]] = None
873
    is_extend_in_batch: bool = False
874
    can_run_dp_cuda_graph: bool = False
875
876
    tbo_split_seq_index: Optional[int] = None
    global_forward_mode: Optional[ForwardMode] = None
Ke Bao's avatar
Ke Bao committed
877

878
    # For processing logprobs
879
    return_logprob: bool = False
880
    top_logprobs_nums: Optional[List[int]] = None
881
    token_ids_logprobs: Optional[List[List[int]]] = None
882

Lianmin Zheng's avatar
Lianmin Zheng committed
883
884
885
886
    # For logits and logprob post processing
    temp_scaled_logprobs: bool = False
    top_p_normalized_logprobs: bool = False

887
888
889
    # For extend and mixed chunekd prefill
    prefix_lens: List[int] = None
    extend_lens: List[int] = None
890
    extend_num_tokens: Optional[int] = None
891
    decoding_reqs: List[Req] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
892
    extend_logprob_start_lens: List[int] = None
893
894
    # It comes empty list if logprob is not required.
    extend_input_logprob_token_ids: Optional[torch.Tensor] = None
895

Lianmin Zheng's avatar
Lianmin Zheng committed
896
    # For encoder-decoder architectures
897
898
899
900
901
    encoder_cached: Optional[List[bool]] = None
    encoder_lens: Optional[torch.Tensor] = None
    encoder_lens_cpu: Optional[List[int]] = None
    encoder_out_cache_loc: Optional[torch.Tensor] = None

902
903
904
    # Stream
    has_stream: bool = False

905
906
    # Has grammar
    has_grammar: bool = False
907

908
    # Device
909
910
    device: str = "cuda"

911
    # Speculative decoding
912
    spec_algorithm: SpeculativeAlgorithm = None
913
    spec_info: Optional[Union[EagleDraftInput, EagleVerifyInput]] = None
914

915
916
917
    # Whether to return hidden states
    return_hidden_states: bool = False

918
919
920
    # Whether this batch is prefill-only (no token generation needed)
    is_prefill_only: bool = False

921
    # hicache pointer for synchronizing data loading from CPU to GPU
922
    hicache_consumer_index: int = -1
923

924
    @classmethod
925
926
    def init_new(
        cls,
927
        reqs: List[Req],
928
        req_to_token_pool: ReqToTokenPool,
929
        token_to_kv_pool_allocator: BaseTokenToKVPoolAllocator,
930
931
932
        tree_cache: BasePrefixCache,
        model_config: ModelConfig,
        enable_overlap: bool,
933
        spec_algorithm: SpeculativeAlgorithm,
934
        chunked_req: Optional[Req] = None,
935
    ):
Lianmin Zheng's avatar
Lianmin Zheng committed
936
937
        return_logprob = any(req.return_logprob for req in reqs)

Hanming Lu's avatar
Hanming Lu committed
938
939
        is_hybrid = False
        if isinstance(token_to_kv_pool_allocator, SWATokenToKVPoolAllocator):
940
941
942
943
            assert (
                tree_cache is None
                or isinstance(tree_cache, SWARadixCache)
                or isinstance(tree_cache, SWAChunkCache)
Hanming Lu's avatar
Hanming Lu committed
944
945
946
            ), "SWARadixCache or SWAChunkCache is required for SWATokenToKVPoolAllocator"
            is_hybrid = True

947
948
949
        return cls(
            reqs=reqs,
            req_to_token_pool=req_to_token_pool,
950
            token_to_kv_pool_allocator=token_to_kv_pool_allocator,
951
            tree_cache=tree_cache,
Hanming Lu's avatar
Hanming Lu committed
952
            is_hybrid=is_hybrid,
953
            model_config=model_config,
954
            enable_overlap=enable_overlap,
Lianmin Zheng's avatar
Lianmin Zheng committed
955
            return_logprob=return_logprob,
956
            has_stream=any(req.stream for req in reqs),
957
            has_grammar=any(req.grammar for req in reqs),
Zhang, Liangang's avatar
Zhang, Liangang committed
958
            device=req_to_token_pool.device,
959
            spec_algorithm=spec_algorithm,
960
            return_hidden_states=any(req.return_hidden_states for req in reqs),
961
            is_prefill_only=all(req.is_prefill_only for req in reqs),
962
            chunked_req=chunked_req,
Lianmin Zheng's avatar
Lianmin Zheng committed
963
964
        )

965
    def batch_size(self):
966
        return len(self.reqs)
967

Lianmin Zheng's avatar
Lianmin Zheng committed
968
969
970
    def is_empty(self):
        return len(self.reqs) == 0

Yi Zhang's avatar
Yi Zhang committed
971
972
973
974
975
    def alloc_req_slots(self, num_reqs: int, reqs: Optional[List[Req]] = None):
        if isinstance(self.req_to_token_pool, HybridReqToTokenPool):
            req_pool_indices = self.req_to_token_pool.alloc(num_reqs, reqs)
        else:
            req_pool_indices = self.req_to_token_pool.alloc(num_reqs)
976
977
        if req_pool_indices is None:
            raise RuntimeError(
978
979
980
981
                "alloc_req_slots runs out of memory. "
                "Please set a smaller number for `--max-running-requests`. "
                f"{self.req_to_token_pool.available_size()=}, "
                f"{num_reqs=}, "
982
983
984
            )
        return req_pool_indices

985
    def alloc_token_slots(self, num_tokens: int, backup_state: bool = False):
Hanming Lu's avatar
Hanming Lu committed
986
        self._evict_tree_cache_if_needed(num_tokens)
Lianmin Zheng's avatar
Lianmin Zheng committed
987

988
989
990
        if backup_state:
            state = self.token_to_kv_pool_allocator.backup_state()

991
        out_cache_loc = self.token_to_kv_pool_allocator.alloc(num_tokens)
Lianmin Zheng's avatar
Lianmin Zheng committed
992
993
994
995
996
        if out_cache_loc is None:
            phase_str = "Prefill" if self.forward_mode.is_extend() else "Decode"
            error_msg = (
                f"{phase_str} out of memory. Try to lower your batch size.\n"
                f"Try to allocate {num_tokens} tokens.\n"
Hanming Lu's avatar
Hanming Lu committed
997
                f"{self._available_and_evictable_str()}"
Lianmin Zheng's avatar
Lianmin Zheng committed
998
999
1000
1001
1002
1003
            )
            logger.error(error_msg)
            if self.tree_cache is not None:
                self.tree_cache.pretty_print()
            raise RuntimeError(error_msg)

1004
1005
1006
1007
        if backup_state:
            return out_cache_loc, state
        else:
            return out_cache_loc
Lianmin Zheng's avatar
Lianmin Zheng committed
1008
1009
1010
1011
1012
1013
1014

    def alloc_paged_token_slots_extend(
        self,
        prefix_lens: torch.Tensor,
        seq_lens: torch.Tensor,
        last_loc: torch.Tensor,
        extend_num_tokens: int,
1015
        backup_state: bool = False,
Lianmin Zheng's avatar
Lianmin Zheng committed
1016
    ):
Lianmin Zheng's avatar
Lianmin Zheng committed
1017
        # Over estimate the number of tokens: assume each request needs a new page.
Hanming Lu's avatar
Hanming Lu committed
1018
1019
        num_tokens = (
            extend_num_tokens
Lianmin Zheng's avatar
Lianmin Zheng committed
1020
            + len(seq_lens) * self.token_to_kv_pool_allocator.page_size
Hanming Lu's avatar
Hanming Lu committed
1021
1022
        )
        self._evict_tree_cache_if_needed(num_tokens)
1023

1024
1025
1026
        if backup_state:
            state = self.token_to_kv_pool_allocator.backup_state()

Lianmin Zheng's avatar
Lianmin Zheng committed
1027
1028
1029
        out_cache_loc = self.token_to_kv_pool_allocator.alloc_extend(
            prefix_lens, seq_lens, last_loc, extend_num_tokens
        )
1030
        if out_cache_loc is None:
Lianmin Zheng's avatar
Lianmin Zheng committed
1031
1032
1033
            error_msg = (
                f"Prefill out of memory. Try to lower your batch size.\n"
                f"Try to allocate {extend_num_tokens} tokens.\n"
Hanming Lu's avatar
Hanming Lu committed
1034
                f"{self._available_and_evictable_str()}"
Lianmin Zheng's avatar
Lianmin Zheng committed
1035
1036
1037
            )
            logger.error(error_msg)
            raise RuntimeError(error_msg)
1038
1039
1040
1041
1042

        if backup_state:
            return out_cache_loc, state
        else:
            return out_cache_loc
Lianmin Zheng's avatar
Lianmin Zheng committed
1043
1044
1045
1046
1047

    def alloc_paged_token_slots_decode(
        self,
        seq_lens: torch.Tensor,
        last_loc: torch.Tensor,
1048
        backup_state: bool = False,
Lianmin Zheng's avatar
Lianmin Zheng committed
1049
    ):
Lianmin Zheng's avatar
Lianmin Zheng committed
1050
        # Over estimate the number of tokens: assume each request needs a new page.
Hanming Lu's avatar
Hanming Lu committed
1051
1052
        num_tokens = len(seq_lens) * self.token_to_kv_pool_allocator.page_size
        self._evict_tree_cache_if_needed(num_tokens)
1053

1054
1055
1056
1057
        if backup_state:
            state = self.token_to_kv_pool_allocator.backup_state()

        out_cache_loc = self.token_to_kv_pool_allocator.alloc_decode(seq_lens, last_loc)
Lianmin Zheng's avatar
Lianmin Zheng committed
1058
1059
1060
1061
        if out_cache_loc is None:
            error_msg = (
                f"Decode out of memory. Try to lower your batch size.\n"
                f"Try to allocate {len(seq_lens)} tokens.\n"
Hanming Lu's avatar
Hanming Lu committed
1062
                f"{self._available_and_evictable_str()}"
Lianmin Zheng's avatar
Lianmin Zheng committed
1063
1064
1065
            )
            logger.error(error_msg)
            raise RuntimeError(error_msg)
1066
1067
1068
1069
1070

        if backup_state:
            return out_cache_loc, state
        else:
            return out_cache_loc
1071

1072
1073
1074
1075
1076
    def prepare_encoder_info_extend(self, input_ids: List[int], seq_lens: List[int]):
        self.encoder_lens_cpu = []
        self.encoder_cached = []

        for req in self.reqs:
Mick's avatar
Mick committed
1077
            im = req.multimodal_inputs
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
            if im is None or im.num_image_tokens is None:
                # No image input
                self.encoder_lens_cpu.append(0)
                self.encoder_cached.append(True)
            else:
                self.encoder_lens_cpu.append(im.num_image_tokens)
                self.encoder_cached.append(
                    self.forward_mode.is_decode()
                    or len(req.prefix_indices) >= im.num_image_tokens
                )

1089
        self.encoder_lens = torch.tensor(self.encoder_lens_cpu, dtype=torch.int64).to(
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
            self.device, non_blocking=True
        )

        # Strip encoder infos
        pt = 0
        decoder_out_cache_loc = []
        encoder_out_cache_loc = []
        for i, req in enumerate(self.reqs):
            encoder_len = self.encoder_lens_cpu[i]
            seq_lens[i] -= encoder_len

            if len(req.prefix_indices) < encoder_len:
1102
                # NOTE: the encoder part should be considered as a whole
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
                assert len(req.prefix_indices) == 0
                input_ids[i] = input_ids[i][encoder_len:]
                encoder_out_cache_loc.append(self.out_cache_loc[pt : pt + encoder_len])
                decoder_out_cache_loc.append(
                    self.out_cache_loc[pt + encoder_len : pt + req.extend_input_len]
                )
                self.extend_lens[i] -= encoder_len
                self.extend_num_tokens -= encoder_len
            else:
                decoder_out_cache_loc.append(
                    self.out_cache_loc[pt : pt + req.extend_input_len]
                )
                self.prefix_lens[i] -= encoder_len

            pt += req.extend_input_len

        # Reassign
Lianmin Zheng's avatar
Lianmin Zheng committed
1120
        self.input_ids = torch.tensor(sum(input_ids, []), dtype=torch.int64).to(
1121
1122
            self.device, non_blocking=True
        )
1123
        self.seq_lens = torch.tensor(seq_lens, dtype=torch.int64).to(
1124
1125
1126
1127
            self.device, non_blocking=True
        )

        if not decoder_out_cache_loc:
Lianmin Zheng's avatar
Lianmin Zheng committed
1128
            self.out_cache_loc = torch.zeros(0, dtype=torch.int64).to(
1129
1130
1131
1132
1133
1134
                self.device, non_blocking=True
            )
        else:
            self.out_cache_loc = torch.cat(decoder_out_cache_loc)

        if not encoder_out_cache_loc:
Lianmin Zheng's avatar
Lianmin Zheng committed
1135
            self.encoder_out_cache_loc = torch.zeros(0, dtype=torch.int64).to(
1136
1137
1138
1139
1140
                self.device, non_blocking=True
            )
        else:
            self.encoder_out_cache_loc = torch.cat(encoder_out_cache_loc)

1141
1142
1143
        assert (
            len(self.out_cache_loc) == self.extend_num_tokens
        ), f"Expected {len(self.out_cache_loc)}, got {self.extend_num_tokens}"
1144

1145
    def prepare_for_extend(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
1146
1147
        self.forward_mode = ForwardMode.EXTEND

Lianmin Zheng's avatar
Lianmin Zheng committed
1148
        # Allocate req slots
1149
        bs = len(self.reqs)
Yi Zhang's avatar
Yi Zhang committed
1150
        req_pool_indices = self.alloc_req_slots(bs, self.reqs)
Lianmin Zheng's avatar
Lianmin Zheng committed
1151
1152

        # Init tensors
Lianmin Zheng's avatar
Lianmin Zheng committed
1153
        reqs = self.reqs
1154
        input_ids = [r.fill_ids[len(r.prefix_indices) :] for r in reqs]
1155
        extend_num_tokens = sum(len(ids) for ids in input_ids)
Lianmin Zheng's avatar
Lianmin Zheng committed
1156
        seq_lens = [len(r.fill_ids) for r in reqs]
1157
        orig_seq_lens = [max(len(r.fill_ids), len(r.origin_input_ids)) for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1158
1159
        prefix_lens = [len(r.prefix_indices) for r in reqs]
        extend_lens = [r.extend_input_len for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1160

woodx's avatar
woodx committed
1161
1162
1163
1164
        token_type_ids = [
            r.token_type_ids for r in reqs if r.token_type_ids is not None
        ]

Lianmin Zheng's avatar
Lianmin Zheng committed
1165
1166
1167
        req_pool_indices_tensor = torch.tensor(req_pool_indices, dtype=torch.int64).to(
            self.device, non_blocking=True
        )
1168
1169
1170
        input_ids_tensor = torch.tensor(
            list(chain.from_iterable(input_ids)), dtype=torch.int64
        ).to(self.device, non_blocking=True)
Lianmin Zheng's avatar
Lianmin Zheng committed
1171
1172
1173
        seq_lens_tensor = torch.tensor(seq_lens, dtype=torch.int64).to(
            self.device, non_blocking=True
        )
1174
1175
1176
        orig_seq_lens_tensor = torch.tensor(orig_seq_lens, dtype=torch.int32).to(
            self.device, non_blocking=True
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
1177
1178
1179
        prefix_lens_tensor = torch.tensor(
            prefix_lens, dtype=torch.int64, device=self.device
        )
woodx's avatar
woodx committed
1180
1181
1182
1183
1184
1185
1186

        token_type_ids_tensor = None
        if len(token_type_ids) > 0:
            token_type_ids_tensor = torch.tensor(
                sum(token_type_ids, []), dtype=torch.int64
            ).to(self.device, non_blocking=True)

Lianmin Zheng's avatar
Lianmin Zheng committed
1187
        extend_lens_tensor = seq_lens_tensor - prefix_lens_tensor
1188

Lianmin Zheng's avatar
Lianmin Zheng committed
1189
        # Copy prefix and do some basic check
Rin Intachuen's avatar
Rin Intachuen committed
1190
        input_embeds = []
1191
        extend_input_logprob_token_ids = []
1192
        multimodal_inputs = []
Rin Intachuen's avatar
Rin Intachuen committed
1193

Lianmin Zheng's avatar
Lianmin Zheng committed
1194
        for i, (req, seq_len, pre_len) in enumerate(zip(reqs, seq_lens, prefix_lens)):
1195
            req.req_pool_idx = req_pool_indices[i]
1196
            assert seq_len - pre_len == req.extend_input_len
Lianmin Zheng's avatar
Lianmin Zheng committed
1197

1198
            if pre_len > 0:
1199
1200
                self.req_to_token_pool.write(
                    (req.req_pool_idx, slice(0, pre_len)), req.prefix_indices
1201
                )
tarinkk's avatar
tarinkk committed
1202
                if isinstance(self.tree_cache, SWAChunkCache):
Hanming Lu's avatar
Hanming Lu committed
1203
                    self.tree_cache.evict_swa(
tarinkk's avatar
tarinkk committed
1204
1205
                        req, pre_len, self.model_config.attention_chunk_size
                    )
1206

Rin Intachuen's avatar
Rin Intachuen committed
1207
1208
1209
1210
1211
            # If input_embeds are available, store them
            if req.input_embeds is not None:
                # If req.input_embeds is already a list, append its content directly
                input_embeds.extend(req.input_embeds)  # Use extend to avoid nesting

1212
1213
            multimodal_inputs.append(req.multimodal_inputs)

1214
1215
            req.cached_tokens += pre_len - req.already_computed
            req.already_computed = seq_len
1216
            req.is_retracted = False
Lianmin Zheng's avatar
Lianmin Zheng committed
1217

1218
            # Compute the relative logprob_start_len in an extend batch
1219
1220
1221
1222
1223
1224
1225
1226
            #
            # Key variables:
            # - logprob_start_len: Absolute position in full sequence where logprob computation begins
            # - extend_logprob_start_len: Relative position within current extend batch where logprob computation begins
            # - extend_input_len: Number of tokens that need to be processed in this extend batch
            #   (= len(fill_ids) - len(prefix_indices), where fill_ids = origin_input_ids + output_ids
            #    and prefix_indices are the cached/shared prefix tokens)
            #
1227
            if req.logprob_start_len >= pre_len:
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
                # Optimization for prefill-only requests: When we only need logprobs at
                # positions beyond the input sequence (to score next-token likelihood), skip all
                # input logprob computation during prefill since no generation will occur.
                if self.is_prefill_only and req.logprob_start_len == len(
                    req.origin_input_ids
                ):
                    # Skip ALL input logprobs: set extend_logprob_start_len = extend_input_len
                    req.extend_logprob_start_len = req.extend_input_len
                else:
                    # Convert absolute logprob_start_len to relative extend_logprob_start_len
                    #
                    # Example: origin_input_ids=[1,2,3,4,5] (5 tokens, positions 0-4), logprob_start_len=3
                    # Regular logic: min(3-0, 5, 5-1) = min(3,5,4) = 3
                    # This means: "compute logprobs from position 3 onwards in extend batch"
                    req.extend_logprob_start_len = min(
                        req.logprob_start_len - pre_len,
                        req.extend_input_len,
                        req.seqlen - 1,
                    )
1247
            else:
1248
                # logprob_start_len is before the current extend batch, so start from beginning
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
                req.extend_logprob_start_len = 0

            if self.return_logprob:
                # Find input logprob token ids.
                # First, find a global index within origin_input_ids and slide it by 1
                # to compute input logprobs. It is because you need the next token
                # to compute input logprobs. E.g., (chunk size 2)
                #
                # input_logprobs = [1, 2, 3, 4]
                # fill_ids = [1, 2]
                # extend_input_logprob_token_id = [2, 3]
                #
                # Note that it can also overflow. In this case, we pad it with 0.
                # input_logprobs = [1, 2, 3, 4]
                # fill_ids = [3, 4]
                # extend_input_logprob_token_id = [4, 0]
                global_start_idx, global_end_idx = (
                    len(req.prefix_indices),
                    len(req.fill_ids),
                )
                # Apply logprob_start_len
                if global_start_idx < req.logprob_start_len:
                    global_start_idx = req.logprob_start_len

                logprob_token_ids = req.origin_input_ids[
                    global_start_idx + 1 : global_end_idx + 1
                ]
                extend_input_logprob_token_ids.extend(logprob_token_ids)

                # We will need req.extend_input_len - req.extend_logprob_start_len number of
                # tokens, and logprob_token_ids is for input logprob, so pad the rest of them by 0.
                extend_input_logprob_token_ids.extend(
                    [0]
                    * (
                        req.extend_input_len
                        - req.extend_logprob_start_len
                        - len(logprob_token_ids)
                    )
                )

        if self.return_logprob:
            extend_input_logprob_token_ids = torch.tensor(
                extend_input_logprob_token_ids
            )
        else:
            extend_input_logprob_token_ids = None
Lianmin Zheng's avatar
Lianmin Zheng committed
1295

Lianmin Zheng's avatar
Lianmin Zheng committed
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
        # Allocate memory
        if self.token_to_kv_pool_allocator.page_size == 1:
            out_cache_loc = self.alloc_token_slots(extend_num_tokens)
        else:
            last_loc = get_last_loc(
                self.req_to_token_pool.req_to_token,
                req_pool_indices_tensor,
                prefix_lens_tensor,
            )
            out_cache_loc = self.alloc_paged_token_slots_extend(
                prefix_lens_tensor, seq_lens_tensor, last_loc, extend_num_tokens
            )

Lianmin Zheng's avatar
Lianmin Zheng committed
1309
        # Set fields
Lianmin Zheng's avatar
Lianmin Zheng committed
1310
1311
1312
        self.input_ids = input_ids_tensor
        self.req_pool_indices = req_pool_indices_tensor
        self.seq_lens = seq_lens_tensor
1313
        self.orig_seq_lens = orig_seq_lens_tensor
Lianmin Zheng's avatar
Lianmin Zheng committed
1314
        self.out_cache_loc = out_cache_loc
Rin Intachuen's avatar
Rin Intachuen committed
1315
1316
1317
1318
1319
        self.input_embeds = (
            torch.tensor(input_embeds).to(self.device, non_blocking=True)
            if input_embeds
            else None
        )
1320
1321
1322
1323
        for mm_input in multimodal_inputs:
            if mm_input is None:
                continue
            for mm_item in mm_input.mm_items:
1324
                pixel_values = getattr(mm_item, "feature", None)
1325
                if isinstance(pixel_values, torch.Tensor):
1326
                    mm_item.feature = pixel_values.to(self.device, non_blocking=True)
1327
        self.multimodal_inputs = multimodal_inputs
woodx's avatar
woodx committed
1328
        self.token_type_ids = token_type_ids_tensor
1329
        self.seq_lens_sum = sum(seq_lens)
Lianmin Zheng's avatar
Lianmin Zheng committed
1330

1331
1332
        if self.return_logprob:
            self.top_logprobs_nums = [r.top_logprobs_num for r in reqs]
1333
            self.token_ids_logprobs = [r.token_ids_logprob for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1334

1335
        self.extend_logprob_start_lens = [r.extend_logprob_start_len for r in reqs]
Lianmin Zheng's avatar
Lianmin Zheng committed
1336
1337
1338
        self.extend_num_tokens = extend_num_tokens
        self.prefix_lens = prefix_lens
        self.extend_lens = extend_lens
1339
        self.extend_input_logprob_token_ids = extend_input_logprob_token_ids
Lianmin Zheng's avatar
Lianmin Zheng committed
1340

1341
        # Write to req_to_token_pool
1342
        if support_triton(global_server_args_dict.get("attention_backend")):
Lianmin Zheng's avatar
Lianmin Zheng committed
1343
1344
            # TODO: some tensors can be reused for ForwardBatchInfo (e.g., extend_lens, cumsum_start)

1345
1346
            write_req_to_token_pool_triton[(bs,)](
                self.req_to_token_pool.req_to_token,
Lianmin Zheng's avatar
Lianmin Zheng committed
1347
1348
1349
1350
1351
                req_pool_indices_tensor,
                prefix_lens_tensor,
                seq_lens_tensor,
                extend_lens_tensor,
                out_cache_loc,
1352
1353
1354
1355
1356
1357
                self.req_to_token_pool.req_to_token.shape[1],
            )
        else:
            pt = 0
            for i in range(bs):
                self.req_to_token_pool.write(
Lianmin Zheng's avatar
Lianmin Zheng committed
1358
1359
                    (req_pool_indices[i], slice(prefix_lens[i], seq_lens[i])),
                    out_cache_loc[pt : pt + extend_lens[i]],
1360
                )
Lianmin Zheng's avatar
Lianmin Zheng committed
1361
                pt += extend_lens[i]
1362

1363
1364
1365
        if self.model_config.is_encoder_decoder:
            self.prepare_encoder_info_extend(input_ids, seq_lens)

1366
        # Build sampling info
1367
        self.sampling_info = SamplingBatchInfo.from_schedule_batch(
1368
1369
            self,
            self.model_config.vocab_size,
1370
        )
1371

1372
1373
1374
1375
1376
    def prepare_for_split_prefill(self):
        self.prepare_for_extend()
        # For split prefill, we need to set the forward mode to SPLIT_PREFILL
        self.forward_mode = ForwardMode.SPLIT_PREFILL

1377
    def mix_with_running(self, running_batch: "ScheduleBatch"):
1378
        self.forward_mode = ForwardMode.MIXED
1379
        running_bs = running_batch.batch_size()
1380
1381
1382
1383
1384

        for req in running_batch.reqs:
            req.fill_ids = req.origin_input_ids + req.output_ids
            req.extend_input_len = 1

1385
        input_ids = torch.cat([self.input_ids, running_batch.input_ids])
1386
        out_cache_loc = torch.cat([self.out_cache_loc, running_batch.out_cache_loc])
1387

1388
        self.merge_batch(running_batch)
1389
1390
        self.input_ids = input_ids
        self.out_cache_loc = out_cache_loc
1391

1392
1393
1394
        # For overlap scheduler, the output_ids has one step delay
        delta = 0 if self.enable_overlap else -1

1395
        # NOTE: prefix_indices is what has been cached, but we don't cache each decode step
1396
        self.prefix_lens.extend(
1397
            [
1398
                len(r.origin_input_ids) + len(r.output_ids) + delta
1399
1400
1401
                for r in running_batch.reqs
            ]
        )
1402
        self.extend_lens.extend([1] * running_bs)
Lianmin Zheng's avatar
Lianmin Zheng committed
1403
1404
        self.extend_num_tokens += running_bs
        # TODO (lianmin): Revisit this. It should be seq_len - 1
1405
        self.extend_logprob_start_lens.extend([0] * running_bs)
1406

1407
    def new_page_count_next_decode(self, selected_indices: Optional[List[int]] = None):
1408
        page_size = self.token_to_kv_pool_allocator.page_size
1409
1410
1411
1412
1413
        requests = (
            self.reqs
            if selected_indices is None
            else [self.reqs[i] for i in selected_indices]
        )
1414
        if page_size == 1:
1415
            return len(requests)
1416
1417
        # In the decoding phase, the length of a request's KV cache should be
        # the total length of the request minus 1
pansicheng's avatar
pansicheng committed
1418
        return (
1419
            sum(1 for req in requests if req.seqlen % page_size == 0)
pansicheng's avatar
pansicheng committed
1420
            if self.enable_overlap
1421
            else sum(1 for req in requests if (req.seqlen - 1) % page_size == 0)
pansicheng's avatar
pansicheng committed
1422
        )
1423

1424
1425
1426
    def check_decode_mem(
        self, buf_multiplier=1, selected_indices: Optional[List[int]] = None
    ):
Hanming Lu's avatar
Hanming Lu committed
1427
        num_tokens = (
1428
            self.new_page_count_next_decode(selected_indices)
1429
1430
1431
1432
            * buf_multiplier
            * self.token_to_kv_pool_allocator.page_size
        )

Hanming Lu's avatar
Hanming Lu committed
1433
1434
        self._evict_tree_cache_if_needed(num_tokens)
        return self._is_available_size_sufficient(num_tokens)
1435

1436
    def retract_decode(self, server_args: ServerArgs):
1437
        """Retract the decoding requests when there is not enough memory."""
1438
        sorted_indices = list(range(len(self.reqs)))
Liangsheng Yin's avatar
Liangsheng Yin committed
1439
1440

        # TODO(lsyin): improve retraction policy for radix cache
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
        # For spec decoding, filter_batch API can only filter
        # requests from the back, so we can only retract from the back.
        # TODO(sang): Clean up finish path and support better retract
        # policy.
        if not server_args.speculative_algorithm:
            sorted_indices.sort(
                key=lambda i: (
                    len(self.reqs[i].output_ids),
                    -len(self.reqs[i].origin_input_ids),
                ),
                reverse=True,
            )

Lianmin Zheng's avatar
Lianmin Zheng committed
1454
1455
1456
        retracted_reqs = []
        seq_lens_cpu = self.seq_lens.cpu().numpy()
        first_iter = True
1457
1458
        while first_iter or (
            not self.check_decode_mem(selected_indices=sorted_indices)
Liangsheng Yin's avatar
Liangsheng Yin committed
1459
1460
1461
        ):
            if len(sorted_indices) == 1:
                # Corner case: only one request left
Hanming Lu's avatar
Hanming Lu committed
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
                if self.is_hybrid:
                    full_available_size = (
                        self.token_to_kv_pool_allocator.full_available_size()
                    )
                    swa_available_size = (
                        self.token_to_kv_pool_allocator.swa_available_size()
                    )
                    assert (
                        full_available_size > 0 and swa_available_size > 0
                    ), f"No space left for only one request in SWA mode {full_available_size=}, {swa_available_size=}"
                else:
                    assert (
                        self.token_to_kv_pool_allocator.available_size() > 0
                    ), f"No space left for only one request, {self.token_to_kv_pool_allocator.available_size()=}"
Liangsheng Yin's avatar
Liangsheng Yin committed
1476
1477
                break

1478
            first_iter = False
1479
1480
1481
1482
            idx = sorted_indices.pop()
            req = self.reqs[idx]
            retracted_reqs.append(req)

1483
1484
1485
1486
1487
            if server_args.disaggregation_mode == "decode":
                req.offload_kv_cache(
                    self.req_to_token_pool, self.token_to_kv_pool_allocator
                )

1488
1489
            if isinstance(self.tree_cache, ChunkCache):
                # ChunkCache does not have eviction
1490
1491
                token_indices = self.req_to_token_pool.req_to_token[
                    req.req_pool_idx, : seq_lens_cpu[idx]
1492
                ]
1493
                self.token_to_kv_pool_allocator.free(token_indices)
1494
                self.req_to_token_pool.free(req.req_pool_idx)
1495
1496
            else:
                # TODO: apply more fine-grained retraction
1497
                last_uncached_pos = (
1498
1499
                    len(req.prefix_indices) // server_args.page_size
                ) * server_args.page_size
1500
1501
                token_indices = self.req_to_token_pool.req_to_token[
                    req.req_pool_idx, last_uncached_pos : seq_lens_cpu[idx]
1502
                ]
1503
                self.token_to_kv_pool_allocator.free(token_indices)
1504
                self.req_to_token_pool.free(req.req_pool_idx)
1505
1506

                # release the last node
Hanming Lu's avatar
Hanming Lu committed
1507
1508
1509
1510
                if self.is_hybrid:
                    self.tree_cache.dec_lock_ref(req.last_node, req.swa_uuid_for_lock)
                else:
                    self.tree_cache.dec_lock_ref(req.last_node)
1511

1512
            req.reset_for_retract()
Liangsheng Yin's avatar
Liangsheng Yin committed
1513

1514
1515
1516
1517
1518
1519
            if len(retracted_reqs) == 0:
                # Corner case: only one request left
                raise ValueError(
                    "Failed to retract any request. No space left for only one request."
                )

1520
        self.filter_batch(keep_indices=sorted_indices)
1521

Liangsheng Yin's avatar
Liangsheng Yin committed
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
        # Reqs in batch are filtered
        total_decoded_tokens = sum(len(r.output_ids) for r in self.reqs)
        total_max_new_tokens = sum(r.sampling_params.max_new_tokens for r in self.reqs)

        new_estimate_ratio = (
            total_decoded_tokens + global_config.retract_decode_steps * len(self.reqs)
        ) / total_max_new_tokens
        new_estimate_ratio = min(1.0, new_estimate_ratio)

        return retracted_reqs, new_estimate_ratio
1532

1533
1534
1535
1536
    def prepare_encoder_info_decode(self):
        # Reset the encoder cached status
        self.encoder_cached = [True] * len(self.reqs)

Ke Bao's avatar
Ke Bao committed
1537
1538
    def prepare_for_idle(self):
        self.forward_mode = ForwardMode.IDLE
Lianmin Zheng's avatar
Lianmin Zheng committed
1539
        self.input_ids = torch.empty(0, dtype=torch.int64, device=self.device)
1540
        self.seq_lens = torch.empty(0, dtype=torch.int64, device=self.device)
1541
        self.orig_seq_lens = torch.empty(0, dtype=torch.int32, device=self.device)
Lianmin Zheng's avatar
Lianmin Zheng committed
1542
        self.out_cache_loc = torch.empty(0, dtype=torch.int64, device=self.device)
1543
        self.req_pool_indices = torch.empty(0, dtype=torch.int32, device=self.device)
1544
        self.seq_lens_sum = 0
Ke Bao's avatar
Ke Bao committed
1545
        self.extend_num_tokens = 0
1546
1547
1548
1549
        self.sampling_info = SamplingBatchInfo.from_schedule_batch(
            self,
            self.model_config.vocab_size,
        )
Ke Bao's avatar
Ke Bao committed
1550

1551
    def prepare_for_decode(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
1552
        self.forward_mode = ForwardMode.DECODE
Lianmin Zheng's avatar
Lianmin Zheng committed
1553
1554
        bs = len(self.reqs)

1555
        if self.spec_algorithm.is_eagle() or self.spec_algorithm.is_standalone():
1556
1557
            # if spec decoding is used, the decode batch is prepared inside
            # `forward_batch_speculative_generation` after running draft models.
1558
            return
Liangsheng Yin's avatar
Liangsheng Yin committed
1559

1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
        if self.sampling_info.penalizer_orchestrator.is_required:
            if self.enable_overlap:
                # TODO: this can be slow, optimize this.
                delayed_output_ids = torch.tensor(
                    [
                        (
                            req.output_ids[-1]
                            if len(req.output_ids)
                            else req.origin_input_ids[-1]
                        )
                        for req in self.reqs
                    ],
                    dtype=torch.int64,
                    device=self.device,
                )
                self.sampling_info.penalizer_orchestrator.cumulate_output_tokens(
                    delayed_output_ids
                )
            else:
                self.sampling_info.penalizer_orchestrator.cumulate_output_tokens(
                    self.output_ids.to(torch.int64)
                )

Lianmin Zheng's avatar
Lianmin Zheng committed
1583
        # Update fields
1584
1585
        self.input_ids = self.output_ids
        self.output_ids = None
Lianmin Zheng's avatar
Lianmin Zheng committed
1586

1587
1588
1589
1590
        if self.model_config.is_encoder_decoder:
            locs = self.encoder_lens + self.seq_lens
            self.prepare_encoder_info_decode()
        else:
Lianmin Zheng's avatar
Lianmin Zheng committed
1591
            locs = self.seq_lens.clone()
1592

1593
        if self.enable_overlap:
1594
1595
            # Do not use in-place operations in the overlap mode
            self.seq_lens = self.seq_lens + 1
1596
            self.orig_seq_lens = self.orig_seq_lens + 1
1597
1598
1599
        else:
            # A faster in-place version
            self.seq_lens.add_(1)
1600
            self.orig_seq_lens.add_(1)
1601
        self.seq_lens_sum += bs
Lianmin Zheng's avatar
Lianmin Zheng committed
1602

tarinkk's avatar
tarinkk committed
1603
1604
1605
        # free memory
        if isinstance(self.tree_cache, SWAChunkCache):
            for req in self.reqs:
Hanming Lu's avatar
Hanming Lu committed
1606
                self.tree_cache.evict_swa(
tarinkk's avatar
tarinkk committed
1607
1608
1609
                    req, req.seqlen - 1, self.model_config.attention_chunk_size
                )

Lianmin Zheng's avatar
Lianmin Zheng committed
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
        # Allocate memory
        if self.token_to_kv_pool_allocator.page_size == 1:
            self.out_cache_loc = self.alloc_token_slots(bs)
        else:
            last_loc = self.req_to_token_pool.req_to_token[
                self.req_pool_indices, self.seq_lens - 2
            ]
            self.out_cache_loc = self.alloc_paged_token_slots_decode(
                self.seq_lens, last_loc
            )

        self.req_to_token_pool.write(
            (self.req_pool_indices, locs), self.out_cache_loc.to(torch.int32)
        )

1625
1626
    def filter_batch(
        self,
1627
        chunked_req_to_exclude: Optional[Union[Req, List[Req]]] = None,
1628
1629
1630
        keep_indices: Optional[List[int]] = None,
    ):
        if keep_indices is None:
1631
1632
1633
1634
            if isinstance(chunked_req_to_exclude, Req):
                chunked_req_to_exclude = [chunked_req_to_exclude]
            elif chunked_req_to_exclude is None:
                chunked_req_to_exclude = []
1635
1636
1637
            keep_indices = [
                i
                for i in range(len(self.reqs))
1638
                if not self.reqs[i].finished()
Lianmin Zheng's avatar
Lianmin Zheng committed
1639
                and self.reqs[i] not in chunked_req_to_exclude
1640
1641
1642
            ]

        if keep_indices is None or len(keep_indices) == 0:
1643
1644
1645
1646
            # Filter out all requests
            self.reqs = []
            return

1647
        if len(keep_indices) == len(self.reqs):
1648
1649
1650
            # No need to filter
            return

1651
1652
1653
1654
        keep_indices_device = torch.tensor(keep_indices, dtype=torch.int64).to(
            self.device, non_blocking=True
        )

1655
        if self.model_config.is_encoder_decoder:
1656
            self.encoder_lens = self.encoder_lens[keep_indices_device]
1657
1658
            self.encoder_lens_cpu = [self.encoder_lens_cpu[i] for i in keep_indices]

1659
        self.reqs = [self.reqs[i] for i in keep_indices]
1660
1661
        if self.multimodal_inputs is not None:
            self.multimodal_inputs = [self.multimodal_inputs[i] for i in keep_indices]
1662
1663
        self.req_pool_indices = self.req_pool_indices[keep_indices_device]
        self.seq_lens = self.seq_lens[keep_indices_device]
1664
        self.orig_seq_lens = self.orig_seq_lens[keep_indices_device]
1665
        self.out_cache_loc = None
1666
        self.seq_lens_sum = self.seq_lens.sum().item()
1667
        self.output_ids = self.output_ids[keep_indices_device]
1668
        self.return_logprob = any(req.return_logprob for req in self.reqs)
1669
        if self.return_logprob:
1670
            self.top_logprobs_nums = [self.top_logprobs_nums[i] for i in keep_indices]
1671
            self.token_ids_logprobs = [self.token_ids_logprobs[i] for i in keep_indices]
1672
1673
        else:
            self.top_logprobs_nums = None
1674
            self.token_ids_logprobs = None
1675

1676
        self.has_stream = any(req.stream for req in self.reqs)
1677
        self.has_grammar = any(req.grammar for req in self.reqs)
Lianmin Zheng's avatar
Lianmin Zheng committed
1678

1679
        self.sampling_info.filter_batch(keep_indices, keep_indices_device)
1680
        if self.spec_info:
1681
            self.spec_info.filter_batch(keep_indices_device)
Lianmin Zheng's avatar
Lianmin Zheng committed
1682

1683
    def merge_batch(self, other: "ScheduleBatch"):
1684
1685
1686
        # Penalizer orchestrator must be merged before Batch.reqs is merged. This is because
        # orchestrator.merge() depends on Batch.reqs during preparation of each penalizers, so it
        # needs to be called with pre-merged Batch.reqs.
1687
        self.sampling_info.merge_batch(other.sampling_info)
1688

1689
1690
1691
1692
        # Encoder-decoder infos
        if self.model_config.is_encoder_decoder:
            self.encoder_lens = torch.cat([self.encoder_lens, other.encoder_lens])
            self.encoder_lens_cpu.extend(other.encoder_lens_cpu)
1693
        self.req_pool_indices = torch.cat(
Lianmin Zheng's avatar
Lianmin Zheng committed
1694
1695
            [self.req_pool_indices, other.req_pool_indices]
        )
1696
        self.seq_lens = torch.cat([self.seq_lens, other.seq_lens])
1697
        self.orig_seq_lens = torch.cat([self.orig_seq_lens, other.orig_seq_lens])
1698
        self.out_cache_loc = None
1699
        self.seq_lens_sum += other.seq_lens_sum
1700
        if self.output_ids is not None:
1701
            self.output_ids = torch.cat([self.output_ids, other.output_ids])
1702
1703
        if self.return_logprob and other.return_logprob:
            self.top_logprobs_nums.extend(other.top_logprobs_nums)
1704
            self.token_ids_logprobs.extend(other.token_ids_logprobs)
1705
1706
        elif self.return_logprob:
            self.top_logprobs_nums.extend([0] * len(other.reqs))
1707
            self.token_ids_logprobs.extend([None] * len(other.reqs))
1708
1709
        elif other.return_logprob:
            self.top_logprobs_nums = [0] * len(self.reqs) + other.top_logprobs_nums
1710
            self.token_ids_logprobs = [None] * len(self.reqs) + other.token_ids_logprobs
1711
        self.reqs.extend(other.reqs)
1712
1713
        if self.multimodal_inputs is not None:
            self.multimodal_inputs.extend(other.multimodal_inputs)
1714

1715
1716
1717
        self.return_logprob |= other.return_logprob
        self.has_stream |= other.has_stream
        self.has_grammar |= other.has_grammar
1718
        self.return_hidden_states |= other.return_hidden_states
1719

1720
1721
1722
        if self.spec_info:
            self.spec_info.merge_batch(other.spec_info)

1723
1724
1725
    def get_model_worker_batch(
        self, seq_lens_cpu_cache: Optional[torch.Tensor] = None
    ) -> ModelWorkerBatch:
1726
        if self.forward_mode.is_decode_or_idle():
1727
            extend_seq_lens = extend_prefix_lens = extend_logprob_start_lens = None
1728
1729
1730
1731
1732
        else:
            extend_seq_lens = self.extend_lens
            extend_prefix_lens = self.prefix_lens
            extend_logprob_start_lens = self.extend_logprob_start_lens

1733
        if self.sampling_info:
Ke Bao's avatar
Ke Bao committed
1734
1735
1736
1737
            if self.has_grammar:
                self.sampling_info.grammars = [req.grammar for req in self.reqs]
            else:
                self.sampling_info.grammars = None
1738

Lianmin Zheng's avatar
Lianmin Zheng committed
1739
1740
1741
1742
1743
1744
        seq_lens_cpu = (
            seq_lens_cpu_cache
            if seq_lens_cpu_cache is not None
            else self.seq_lens.cpu()
        )

1745
1746
        global bid
        bid += 1
1747
        return ModelWorkerBatch(
1748
            bid=bid,
1749
1750
1751
1752
            forward_mode=self.forward_mode,
            input_ids=self.input_ids,
            req_pool_indices=self.req_pool_indices,
            seq_lens=self.seq_lens,
1753
            orig_seq_lens=self.orig_seq_lens,
1754
            out_cache_loc=self.out_cache_loc,
1755
            seq_lens_cpu=seq_lens_cpu,
1756
            seq_lens_sum=self.seq_lens_sum,
1757
1758
            return_logprob=self.return_logprob,
            top_logprobs_nums=self.top_logprobs_nums,
1759
            token_ids_logprobs=self.token_ids_logprobs,
Ke Bao's avatar
Ke Bao committed
1760
            global_num_tokens=self.global_num_tokens,
1761
            global_num_tokens_for_logprob=self.global_num_tokens_for_logprob,
1762
            is_extend_in_batch=self.is_extend_in_batch,
1763
            can_run_dp_cuda_graph=self.can_run_dp_cuda_graph,
1764
1765
            tbo_split_seq_index=self.tbo_split_seq_index,
            global_forward_mode=self.global_forward_mode,
1766
            extend_num_tokens=self.extend_num_tokens,
1767
1768
1769
            extend_seq_lens=extend_seq_lens,
            extend_prefix_lens=extend_prefix_lens,
            extend_logprob_start_lens=extend_logprob_start_lens,
1770
            multimodal_inputs=self.multimodal_inputs,
1771
1772
1773
1774
            encoder_cached=self.encoder_cached,
            encoder_lens=self.encoder_lens,
            encoder_lens_cpu=self.encoder_lens_cpu,
            encoder_out_cache_loc=self.encoder_out_cache_loc,
1775
            lora_ids=[req.lora_id for req in self.reqs],
1776
            sampling_info=self.sampling_info,
Rin Intachuen's avatar
Rin Intachuen committed
1777
            input_embeds=self.input_embeds,
woodx's avatar
woodx committed
1778
            token_type_ids=self.token_type_ids,
1779
1780
            spec_algorithm=self.spec_algorithm,
            spec_info=self.spec_info,
1781
            hicache_consumer_index=self.hicache_consumer_index,
Lianmin Zheng's avatar
Lianmin Zheng committed
1782
            capture_hidden_mode=(
1783
                CaptureHiddenMode.FULL
1784
                if self.return_hidden_states
1785
1786
1787
1788
1789
1790
1791
                else (
                    getattr(
                        self.spec_info, "capture_hidden_mode", CaptureHiddenMode.NULL
                    )
                    if self.spec_info
                    else CaptureHiddenMode.NULL
                )
Lianmin Zheng's avatar
Lianmin Zheng committed
1792
            ),
1793
            extend_input_logprob_token_ids=self.extend_input_logprob_token_ids,
1794
            launch_done=self.launch_done,
1795
            is_prefill_only=self.is_prefill_only,
1796
1797
        )

1798
    def copy(self):
1799
        # Only contain fields that will be used by process_batch_result
1800
1801
        return ScheduleBatch(
            reqs=self.reqs,
1802
            model_config=self.model_config,
1803
            forward_mode=self.forward_mode,
1804
1805
            out_cache_loc=self.out_cache_loc,
            return_logprob=self.return_logprob,
1806
            decoding_reqs=self.decoding_reqs,
1807
            spec_algorithm=self.spec_algorithm,
1808
1809
1810
1811
            global_num_tokens=self.global_num_tokens,
            global_num_tokens_for_logprob=self.global_num_tokens_for_logprob,
            can_run_dp_cuda_graph=self.can_run_dp_cuda_graph,
            is_extend_in_batch=self.is_extend_in_batch,
1812
            is_prefill_only=self.is_prefill_only,
1813
1814
        )

Lianmin Zheng's avatar
Lianmin Zheng committed
1815
1816
    def _evict_tree_cache_if_needed(self, num_tokens: int):
        if isinstance(self.tree_cache, (SWAChunkCache, ChunkCache)):
Hanming Lu's avatar
Hanming Lu committed
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
            return

        if self.is_hybrid:
            full_available_size = self.token_to_kv_pool_allocator.full_available_size()
            swa_available_size = self.token_to_kv_pool_allocator.swa_available_size()

            if full_available_size < num_tokens or swa_available_size < num_tokens:
                if self.tree_cache is not None:
                    full_num_tokens = max(0, num_tokens - full_available_size)
                    swa_num_tokens = max(0, num_tokens - swa_available_size)
                    self.tree_cache.evict(full_num_tokens, swa_num_tokens)
        else:
            if self.token_to_kv_pool_allocator.available_size() < num_tokens:
                if self.tree_cache is not None:
                    self.tree_cache.evict(num_tokens)

    def _is_available_size_sufficient(self, num_tokens: int) -> bool:
        if self.is_hybrid:
            return (
                self.token_to_kv_pool_allocator.full_available_size() >= num_tokens
                and self.token_to_kv_pool_allocator.swa_available_size() >= num_tokens
            )
        else:
            return self.token_to_kv_pool_allocator.available_size() >= num_tokens

    def _available_and_evictable_str(self) -> str:
        if self.is_hybrid:
            full_available_size = self.token_to_kv_pool_allocator.full_available_size()
            swa_available_size = self.token_to_kv_pool_allocator.swa_available_size()
            full_evictable_size = self.tree_cache.full_evictable_size()
            swa_evictable_size = self.tree_cache.swa_evictable_size()
            return (
                f"Available full tokens: {full_available_size + full_evictable_size} ({full_available_size=} + {full_evictable_size=})\n"
                f"Available swa tokens: {swa_available_size + swa_evictable_size} ({swa_available_size=} + {swa_evictable_size=})\n"
                f"Full LRU list evictable size: {self.tree_cache.full_lru_list_evictable_size()}\n"
                f"SWA LRU list evictable size: {self.tree_cache.swa_lru_list_evictable_size()}\n"
            )
        else:
            available_size = self.token_to_kv_pool_allocator.available_size()
            evictable_size = self.tree_cache.evictable_size()
            return f"Available tokens: {available_size + evictable_size} ({available_size=} + {evictable_size=})\n"

1859
1860
    def __str__(self):
        return (
1861
            f"ScheduleBatch(forward_mode={self.forward_mode.name if self.forward_mode else 'None'}, "
1862
1863
1864
            f"#req={(len(self.reqs))})"
        )

Chayenne's avatar
Chayenne committed
1865

1866
@dataclasses.dataclass
1867
class ModelWorkerBatch:
1868
1869
    # The batch id
    bid: int
1870
1871
1872
    # The forward mode
    forward_mode: ForwardMode
    # The input ids
1873
    input_ids: torch.Tensor
1874
1875
1876
1877
    # The indices of requests in the req_to_token_pool
    req_pool_indices: torch.Tensor
    # The sequence length
    seq_lens: torch.Tensor
1878
    # The indices of output tokens in the token_to_kv_pool_allocator
1879
    out_cache_loc: torch.Tensor
1880
1881
    # The sequence length tensor on CPU
    seq_lens_cpu: Optional[torch.Tensor]
1882
1883
    seq_lens_sum: int

1884
1885
1886
    # For logprob
    return_logprob: bool
    top_logprobs_nums: Optional[List[int]]
1887
    token_ids_logprobs: Optional[List[List[int]]]
1888

Ke Bao's avatar
Ke Bao committed
1889
1890
    # For DP attention
    global_num_tokens: Optional[List[int]]
1891
    global_num_tokens_for_logprob: Optional[List[int]]
1892
    is_extend_in_batch: bool
1893
    can_run_dp_cuda_graph: bool
1894
1895
    tbo_split_seq_index: Optional[int]
    global_forward_mode: Optional[ForwardMode]
Ke Bao's avatar
Ke Bao committed
1896

1897
    # For extend
1898
    extend_num_tokens: Optional[int]
1899
1900
1901
    extend_seq_lens: Optional[List[int]]
    extend_prefix_lens: Optional[List[int]]
    extend_logprob_start_lens: Optional[List[int]]
1902
    extend_input_logprob_token_ids: Optional[torch.Tensor]
1903
1904

    # For multimodal
Mick's avatar
Mick committed
1905
    multimodal_inputs: Optional[List[MultimodalInputs]]
1906

1907
1908
1909
1910
1911
1912
    # For encoder-decoder
    encoder_cached: Optional[List[bool]]
    encoder_lens: Optional[torch.Tensor]
    encoder_lens_cpu: Optional[List[int]]
    encoder_out_cache_loc: Optional[torch.Tensor]

1913
    # For LoRA
1914
    lora_ids: Optional[List[str]]
1915
1916
1917

    # Sampling info
    sampling_info: SamplingBatchInfo
1918

1919
1920
1921
    # The original sequence lengths, Qwen-1M related
    orig_seq_lens: Optional[torch.Tensor] = None

Rin Intachuen's avatar
Rin Intachuen committed
1922
    # The input Embeds
Cheng Wan's avatar
Cheng Wan committed
1923
    input_embeds: Optional[torch.Tensor] = None
Rin Intachuen's avatar
Rin Intachuen committed
1924

woodx's avatar
woodx committed
1925
1926
1927
    # For corss-encoder model
    token_type_ids: Optional[torch.Tensor] = None

1928
    # Speculative decoding
1929
    spec_algorithm: SpeculativeAlgorithm = None
1930
1931
    spec_info: Optional[Union[EagleVerifyInput, EagleDraftInput]] = None
    # If set, the output of the batch contains the hidden states of the run.
Lianmin Zheng's avatar
Lianmin Zheng committed
1932
    capture_hidden_mode: CaptureHiddenMode = None
1933
    hicache_consumer_index: int = -1
1934

1935
1936
1937
    # Overlap event
    launch_done: Optional[threading.Event] = None

1938
1939
1940
    # Whether this batch is prefill-only (no token generation needed)
    is_prefill_only: bool = False

1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958

@triton.jit
def write_req_to_token_pool_triton(
    req_to_token_ptr,  # [max_batch, max_context_len]
    req_pool_indices,
    pre_lens,
    seq_lens,
    extend_lens,
    out_cache_loc,
    req_to_token_ptr_stride: tl.constexpr,
):
    BLOCK_SIZE: tl.constexpr = 512
    pid = tl.program_id(0)

    req_pool_index = tl.load(req_pool_indices + pid)
    pre_len = tl.load(pre_lens + pid)
    seq_len = tl.load(seq_lens + pid)

Lianmin Zheng's avatar
Lianmin Zheng committed
1959
1960
    # NOTE: This can be slow for large bs
    cumsum_start = tl.cast(0, tl.int64)
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
    for i in range(pid):
        cumsum_start += tl.load(extend_lens + i)

    num_loop = tl.cdiv(seq_len - pre_len, BLOCK_SIZE)
    for i in range(num_loop):
        offset = tl.arange(0, BLOCK_SIZE) + i * BLOCK_SIZE
        mask = offset < (seq_len - pre_len)
        value = tl.load(out_cache_loc + cumsum_start + offset, mask=mask)
        tl.store(
            req_to_token_ptr
            + req_pool_index * req_to_token_ptr_stride
            + offset
            + pre_len,
            value,
            mask=mask,
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
1977
1978


1979
1980
1981
1982
1983
def get_last_loc(
    req_to_token: torch.Tensor,
    req_pool_indices_tensor: torch.Tensor,
    prefix_lens_tensor: torch.Tensor,
) -> torch.Tensor:
1984
1985
1986
1987
    if (
        global_server_args_dict["attention_backend"] != "ascend"
        and global_server_args_dict["attention_backend"] != "torch_native"
    ):
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
        impl = get_last_loc_triton
    else:
        impl = get_last_loc_torch

    return impl(req_to_token, req_pool_indices_tensor, prefix_lens_tensor)


def get_last_loc_torch(
    req_to_token: torch.Tensor,
    req_pool_indices_tensor: torch.Tensor,
    prefix_lens_tensor: torch.Tensor,
) -> torch.Tensor:
Lianmin Zheng's avatar
Lianmin Zheng committed
2000
2001
2002
2003
2004
    return torch.where(
        prefix_lens_tensor > 0,
        req_to_token[req_pool_indices_tensor, prefix_lens_tensor - 1],
        torch.full_like(prefix_lens_tensor, -1),
    )
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050


@triton.jit
def get_last_loc_kernel(
    req_to_token,
    req_pool_indices_tensor,
    prefix_lens_tensor,
    result,
    num_tokens,
    req_to_token_stride,
    BLOCK_SIZE: tl.constexpr,
):
    pid = tl.program_id(0)
    offset = tl.arange(0, BLOCK_SIZE) + pid * BLOCK_SIZE
    mask = offset < num_tokens

    prefix_lens = tl.load(prefix_lens_tensor + offset, mask=mask, other=0)
    req_pool_indices = tl.load(req_pool_indices_tensor + offset, mask=mask, other=0)

    token_mask = prefix_lens > 0
    token_index = req_pool_indices * req_to_token_stride + (prefix_lens - 1)
    tokens = tl.load(req_to_token + token_index, mask=token_mask, other=-1)

    tl.store(result + offset, tokens, mask=mask)


def get_last_loc_triton(
    req_to_token: torch.Tensor,
    req_pool_indices_tensor: torch.Tensor,
    prefix_lens_tensor: torch.Tensor,
) -> torch.Tensor:
    BLOCK_SIZE = 256
    num_tokens = prefix_lens_tensor.shape[0]
    result = torch.empty_like(prefix_lens_tensor)
    grid = (triton.cdiv(num_tokens, BLOCK_SIZE),)

    get_last_loc_kernel[grid](
        req_to_token,
        req_pool_indices_tensor,
        prefix_lens_tensor,
        result,
        num_tokens,
        req_to_token.stride(0),
        BLOCK_SIZE,
    )
    return result