# Copyright 2023-2024 SGLang Team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A scheduler that manages a tensor parallel GPU worker.""" import faulthandler import logging import os import signal import sys import threading import time from collections import deque from concurrent import futures from dataclasses import dataclass from http import HTTPStatus from types import SimpleNamespace from typing import Dict, List, Optional, Tuple, Union import psutil import setproctitle import torch import zmq from torch.distributed import barrier from sglang.global_config import global_config from sglang.srt.configs.model_config import ModelConfig from sglang.srt.constrained.base_grammar_backend import ( INVALID_GRAMMAR_OBJ, create_grammar_backend, ) from sglang.srt.disaggregation.decode import ( DecodePreallocQueue, DecodeTransferQueue, SchedulerDisaggregationDecodeMixin, ) from sglang.srt.disaggregation.prefill import ( PrefillBootstrapQueue, SchedulerDisaggregationPrefillMixin, ) from sglang.srt.disaggregation.utils import ( DisaggregationMode, MetadataBuffers, ReqToMetadataIdxAllocator, TransferBackend, prepare_abort, ) from sglang.srt.distributed import get_pp_group, get_world_group from sglang.srt.eplb.expert_distribution import get_global_expert_distribution_recorder from sglang.srt.hf_transformers_utils import ( get_processor, get_tokenizer, get_tokenizer_from_processor, ) from sglang.srt.layers.dp_attention import compute_dp_attention_world_info from sglang.srt.layers.logits_processor import LogitsProcessorOutput from sglang.srt.layers.moe.utils import DeepEPMode, MoeA2ABackend from sglang.srt.managers.io_struct import ( AbortReq, CloseSessionReqInput, ExpertDistributionReq, ExpertDistributionReqOutput, FlushCacheReqInput, FlushCacheReqOutput, GetInternalStateReq, GetInternalStateReqOutput, GetWeightsByNameReqInput, HealthCheckOutput, InitWeightsUpdateGroupReqInput, LoadLoRAAdapterReqInput, LoadLoRAAdapterReqOutput, OpenSessionReqInput, OpenSessionReqOutput, ProfileReq, ReleaseMemoryOccupationReqInput, ResumeMemoryOccupationReqInput, RpcReqInput, RpcReqOutput, SetInternalStateReq, SetInternalStateReqOutput, SlowDownReqInput, SlowDownReqOutput, TokenizedEmbeddingReqInput, TokenizedGenerateReqInput, UnloadLoRAAdapterReqInput, UnloadLoRAAdapterReqOutput, UpdateWeightFromDiskReqInput, UpdateWeightsFromDistributedReqInput, UpdateWeightsFromTensorReqInput, ) from sglang.srt.managers.mm_utils import init_embedding_cache from sglang.srt.managers.schedule_batch import ( FINISH_ABORT, MultimodalInputs, Req, ScheduleBatch, global_server_args_dict, ) from sglang.srt.managers.schedule_policy import ( AddReqResult, PrefillAdder, SchedulePolicy, ) from sglang.srt.managers.scheduler_input_blocker import SchedulerInputBlocker from sglang.srt.managers.scheduler_metrics_mixin import ( RECORD_STEP_TIME, SchedulerMetricsMixin, ) from sglang.srt.managers.scheduler_output_processor_mixin import ( SchedulerOutputProcessorMixin, ) from sglang.srt.managers.scheduler_profiler_mixin import SchedulerProfilerMixin from sglang.srt.managers.scheduler_recv_skipper import SchedulerRecvSkipper from sglang.srt.managers.scheduler_update_weights_mixin import ( SchedulerUpdateWeightsMixin, ) from sglang.srt.managers.session_controller import Session from sglang.srt.managers.tp_worker import TpModelWorker from sglang.srt.managers.tp_worker_overlap_thread import TpModelWorkerClient from sglang.srt.managers.utils import DPBalanceMeta, validate_input_length from sglang.srt.mem_cache.chunk_cache import ChunkCache, SWAChunkCache from sglang.srt.mem_cache.hiradix_cache import HiRadixCache from sglang.srt.mem_cache.radix_cache import RadixCache from sglang.srt.mem_cache.swa_radix_cache import SWARadixCache from sglang.srt.model_executor.forward_batch_info import ForwardMode, PPProxyTensors from sglang.srt.reasoning_parser import ReasoningParser from sglang.srt.server_args import PortArgs, ServerArgs from sglang.srt.speculative.spec_info import SpeculativeAlgorithm from sglang.srt.torch_memory_saver_adapter import TorchMemorySaverAdapter from sglang.srt.two_batch_overlap import TboDPAttentionPreparer from sglang.srt.utils import ( DynamicGradMode, broadcast_pyobj, configure_gc_logger, configure_logger, disable_request_logging, get_available_gpu_memory, get_bool_env_var, get_zmq_socket, is_cpu, kill_itself_when_parent_died, point_to_point_pyobj, pyspy_dump_schedulers, require_mlp_sync, require_mlp_tp_gather, set_gpu_proc_affinity, set_random_seed, suppress_other_loggers, ) from sglang.utils import TypeBasedDispatcher, get_exception_traceback logger = logging.getLogger(__name__) # Test retract decode for debugging purposes TEST_RETRACT = get_bool_env_var("SGLANG_TEST_RETRACT") GRAMMAR_TIMEOUT = float(os.environ.get("SGLANG_GRAMMAR_TIMEOUT", 300)) _is_cpu = is_cpu() @dataclass class GenerationBatchResult: logits_output: Optional[LogitsProcessorOutput] pp_hidden_states_proxy_tensors: Optional[torch.Tensor] next_token_ids: Optional[List[int]] extend_input_len_per_req: List[int] extend_logprob_start_len_per_req: List[int] bid: int can_run_cuda_graph: bool @dataclass class EmbeddingBatchResult: embeddings: torch.Tensor bid: int class Scheduler( SchedulerOutputProcessorMixin, SchedulerUpdateWeightsMixin, SchedulerProfilerMixin, SchedulerMetricsMixin, SchedulerDisaggregationDecodeMixin, SchedulerDisaggregationPrefillMixin, ): """A scheduler that manages a tensor parallel GPU worker.""" def __init__( self, server_args: ServerArgs, port_args: PortArgs, gpu_id: int, tp_rank: int, moe_ep_rank: int, pp_rank: int, dp_rank: Optional[int], dp_balance_meta: Optional[DPBalanceMeta] = None, ): # Parse args self.server_args = server_args self.tp_rank = tp_rank self.moe_ep_rank = moe_ep_rank self.pp_rank = pp_rank self.dp_rank = dp_rank self.tp_size = server_args.tp_size self.moe_ep_size = server_args.ep_size self.pp_size = server_args.pp_size self.dp_size = server_args.dp_size self.schedule_policy = server_args.schedule_policy self.enable_lora = server_args.enable_lora self.max_loras_per_batch = server_args.max_loras_per_batch self.enable_overlap = not server_args.disable_overlap_schedule self.skip_tokenizer_init = server_args.skip_tokenizer_init self.enable_metrics = server_args.enable_metrics self.enable_metrics_for_all_schedulers = ( server_args.enable_metrics_for_all_schedulers ) self.enable_kv_cache_events = server_args.kv_events_config is not None self.stream_interval = server_args.stream_interval self.spec_algorithm = SpeculativeAlgorithm.from_string( server_args.speculative_algorithm ) self.gpu_id = gpu_id self.enable_hierarchical_cache = server_args.enable_hierarchical_cache self.enable_hicache_storage = server_args.hicache_storage_backend is not None self.page_size = server_args.page_size self.attn_tp_rank, self.attn_tp_size, self.attn_dp_rank = ( compute_dp_attention_world_info( server_args.enable_dp_attention, self.tp_rank, self.tp_size, self.dp_size, ) ) # Init inter-process communication context = zmq.Context(2) self.idle_sleeper = None if self.pp_rank == 0 and self.attn_tp_rank == 0: self.recv_from_tokenizer = get_zmq_socket( context, zmq.PULL, port_args.scheduler_input_ipc_name, False ) self.recv_from_rpc = get_zmq_socket( context, zmq.DEALER, port_args.rpc_ipc_name, False ) self.send_to_tokenizer = get_zmq_socket( context, zmq.PUSH, port_args.tokenizer_ipc_name, False ) if server_args.skip_tokenizer_init: # Directly send to the TokenizerManager self.send_to_detokenizer = get_zmq_socket( context, zmq.PUSH, port_args.tokenizer_ipc_name, False ) else: # Send to the DetokenizerManager self.send_to_detokenizer = get_zmq_socket( context, zmq.PUSH, port_args.detokenizer_ipc_name, False ) if self.server_args.sleep_on_idle: self.idle_sleeper = IdleSleeper( [ self.recv_from_tokenizer, self.recv_from_rpc, ] ) else: self.recv_from_tokenizer = None self.recv_from_rpc = None self.send_to_tokenizer = SimpleNamespace(send_pyobj=lambda x: None) self.send_to_detokenizer = SimpleNamespace(send_pyobj=lambda x: None) if self.current_scheduler_metrics_enabled(): self.send_metrics_from_scheduler = get_zmq_socket( context, zmq.PUSH, port_args.metrics_ipc_name, False ) # Init tokenizer self.init_tokenizer() # Set reasoning_parser and think_end_id if --reasoning_parser is enabled if self.server_args.reasoning_parser and self.tokenizer: reasoning_parser = ReasoningParser( model_type=self.server_args.reasoning_parser, stream_reasoning=False ) self.tokenizer.think_end_id = self.tokenizer.encode( reasoning_parser.detector.think_end_token, add_special_tokens=False )[0] # Check whether overlap can be enabled if not self.is_generation: self.enable_overlap = False logger.info("Overlap scheduler is disabled for embedding models.") # Launch a tensor parallel worker if self.enable_overlap: TpWorkerClass = TpModelWorkerClient else: TpWorkerClass = TpModelWorker self.tp_worker = TpWorkerClass( server_args=server_args, gpu_id=gpu_id, tp_rank=tp_rank, moe_ep_rank=moe_ep_rank, pp_rank=pp_rank, dp_rank=dp_rank, nccl_port=port_args.nccl_port, ) # Launch a draft worker for speculative decoding if self.spec_algorithm.is_eagle(): from sglang.srt.speculative.eagle_worker import EAGLEWorker self.draft_worker = EAGLEWorker( gpu_id=gpu_id, tp_rank=tp_rank, moe_ep_rank=moe_ep_rank, server_args=server_args, nccl_port=port_args.nccl_port, target_worker=self.tp_worker, dp_rank=dp_rank, ) else: self.draft_worker = None # Get token and memory info from the model worker ( self.max_total_num_tokens, self.max_prefill_tokens, self.max_running_requests, self.max_queued_requests, self.max_req_len, self.max_req_input_len, self.random_seed, self.device, worker_global_server_args_dict, _, _, _, ) = self.tp_worker.get_worker_info() if global_server_args_dict["max_micro_batch_size"] is None: global_server_args_dict["max_micro_batch_size"] = max( self.max_running_requests // server_args.pp_size, 1 ) self.tp_group = self.tp_worker.get_tp_group() self.tp_cpu_group = self.tp_group.cpu_group self.attn_tp_group = self.tp_worker.get_attention_tp_group() self.attn_tp_cpu_group = self.tp_worker.get_attention_tp_cpu_group() self.pp_group = get_pp_group() self.world_group = get_world_group() self.pad_input_ids_func = self.tp_worker.get_pad_input_ids_func() global_server_args_dict.update(worker_global_server_args_dict) set_random_seed(self.random_seed) # Hybrid memory pool self.is_hybrid = self.tp_worker.is_hybrid if self.is_hybrid: self.sliding_window_size = self.tp_worker.sliding_window_size self.full_tokens_per_layer, self.swa_tokens_per_layer = ( self.tp_worker.get_tokens_per_layer_info() ) # Print debug info if tp_rank == 0: avail_mem = get_available_gpu_memory( self.device, self.gpu_id, empty_cache=False ) logger.info( f"max_total_num_tokens={self.max_total_num_tokens}, " f"chunked_prefill_size={server_args.chunked_prefill_size}, " f"max_prefill_tokens={self.max_prefill_tokens}, " f"max_running_requests={self.max_running_requests}, " f"context_len={self.model_config.context_len}, " f"available_gpu_mem={avail_mem:.2f} GB" ) # Init memory pool and cache self.init_memory_pool_and_cache() # Init running status self.waiting_queue: List[Req] = [] # The running decoding batch for continuous batching self.running_batch: ScheduleBatch = ScheduleBatch(reqs=[], batch_is_full=False) # The current forward batch self.cur_batch: Optional[ScheduleBatch] = None # The last forward batch self.last_batch: Optional[ScheduleBatch] = None self.forward_ct = 0 self.forward_ct_decode = 0 self.num_generated_tokens = 0 self.last_prefill_tokens = 0 self.last_decode_stats_tic = time.perf_counter() self.last_prefill_stats_tic = time.perf_counter() self.return_health_check_ct = 0 self.num_retracted_reqs: int = 0 self.num_paused_reqs: int = 0 self.kv_transfer_speed_gb_s: float = 0.0 self.kv_transfer_latency_ms: float = 0.0 self.sessions: Dict[str, Session] = {} self.current_stream = torch.get_device_module(self.device).current_stream() if self.device == "cpu": self.current_stream.synchronize = lambda: None # No-op for CPU self.forward_sleep_time = None # Init chunked prefill self.chunked_prefill_size = server_args.chunked_prefill_size if self.chunked_prefill_size <= 0: # -1 means disable self.chunked_prefill_size = None self.chunked_req = None self.is_mixed_chunk = ( self.chunked_prefill_size is not None and server_args.enable_mixed_chunk ) # Init the grammar backend for constrained generation self.grammar_queue: List[Req] = [] if not server_args.skip_tokenizer_init: self.grammar_backend = create_grammar_backend( server_args, self.tokenizer, self.model_config.vocab_size, self.model_config.hf_eos_token_id, ) else: self.grammar_backend = None # Init schedule policy and new token estimation self.policy = SchedulePolicy( self.schedule_policy, self.tree_cache, self.enable_hierarchical_cache, ) assert ( server_args.schedule_conservativeness >= 0 ), "Invalid schedule_conservativeness" self.init_new_token_ratio = min( global_config.default_init_new_token_ratio * server_args.schedule_conservativeness, 1.0, ) self.min_new_token_ratio = min( self.init_new_token_ratio * global_config.default_min_new_token_ratio_factor, 1.0, ) self.new_token_ratio_decay = ( self.init_new_token_ratio - self.min_new_token_ratio ) / global_config.default_new_token_ratio_decay_steps self.new_token_ratio = self.init_new_token_ratio # Init watchdog thread self.watchdog_timeout = server_args.watchdog_timeout t = threading.Thread(target=self.watchdog_thread, daemon=True) t.start() self.parent_process = psutil.Process().parent() # Init memory saver, profiler and metric stats self.memory_saver_adapter = TorchMemorySaverAdapter.create( enable=server_args.enable_memory_saver ) self.init_profier() self.recv_skipper = SchedulerRecvSkipper.maybe_create(server_args) self.input_blocker = ( SchedulerInputBlocker(noop=self.attn_tp_rank != 0) if get_bool_env_var("SGLANG_ENABLE_COLOCATED_BATCH_GEN") else None ) # Init metrics stats self.init_metrics(tp_rank, pp_rank, dp_rank) self.init_kv_events(server_args.kv_events_config) # Init disaggregation self.disaggregation_mode = DisaggregationMode( self.server_args.disaggregation_mode ) self.init_disaggregation() if get_bool_env_var("SGLANG_GC_LOG"): configure_gc_logger() # Init request dispatcher self._request_dispatcher = TypeBasedDispatcher( [ (TokenizedGenerateReqInput, self.handle_generate_request), (TokenizedEmbeddingReqInput, self.handle_embedding_request), (FlushCacheReqInput, self.flush_cache_wrapped), (AbortReq, self.abort_request), (OpenSessionReqInput, self.open_session), (CloseSessionReqInput, self.close_session), (UpdateWeightFromDiskReqInput, self.update_weights_from_disk), (InitWeightsUpdateGroupReqInput, self.init_weights_update_group), ( UpdateWeightsFromDistributedReqInput, self.update_weights_from_distributed, ), (UpdateWeightsFromTensorReqInput, self.update_weights_from_tensor), (GetWeightsByNameReqInput, self.get_weights_by_name), (ReleaseMemoryOccupationReqInput, self.release_memory_occupation), (ResumeMemoryOccupationReqInput, self.resume_memory_occupation), (SlowDownReqInput, self.slow_down), (ProfileReq, self.profile), (GetInternalStateReq, self.get_internal_state), (SetInternalStateReq, self.set_internal_state), (RpcReqInput, self.handle_rpc_request), (ExpertDistributionReq, self.expert_distribution_handle), (LoadLoRAAdapterReqInput, self.load_lora_adapter), (UnloadLoRAAdapterReqInput, self.unload_lora_adapter), ] ) self.balance_meta = dp_balance_meta if ( server_args.enable_dp_attention and server_args.load_balance_method == "minimum_tokens" ): assert dp_balance_meta is not None self.recv_dp_balance_id_this_term = [] def init_tokenizer(self): server_args = self.server_args self.model_config = ModelConfig.from_server_args(server_args) self.is_generation = self.model_config.is_generation if server_args.skip_tokenizer_init: self.tokenizer = self.processor = None else: if self.model_config.is_multimodal: self.processor = get_processor( server_args.tokenizer_path, tokenizer_mode=server_args.tokenizer_mode, trust_remote_code=server_args.trust_remote_code, revision=server_args.revision, use_fast=not server_args.disable_fast_image_processor, ) self.tokenizer = get_tokenizer_from_processor(self.processor) else: self.tokenizer = get_tokenizer( server_args.tokenizer_path, tokenizer_mode=server_args.tokenizer_mode, trust_remote_code=server_args.trust_remote_code, revision=server_args.revision, ) def init_memory_pool_and_cache(self): server_args = self.server_args self.req_to_token_pool, self.token_to_kv_pool_allocator = ( self.tp_worker.get_memory_pool() ) if ( server_args.chunked_prefill_size is not None and server_args.disable_radix_cache ): if self.is_hybrid: ChunkCacheClass = SWAChunkCache else: ChunkCacheClass = ChunkCache self.tree_cache = ChunkCacheClass( req_to_token_pool=self.req_to_token_pool, token_to_kv_pool_allocator=self.token_to_kv_pool_allocator, page_size=self.page_size, ) else: if os.environ.get("SGLANG_EXPERIMENTAL_CPP_RADIX_TREE") == "1": # lazy import to avoid JIT overhead from sglang.srt.mem_cache.radix_cache_cpp import RadixCacheCpp self.tree_cache = RadixCacheCpp( disable=False, use_hicache=self.enable_hierarchical_cache, req_to_token_pool=self.req_to_token_pool, token_to_kv_pool=self.token_to_kv_pool_allocator, tp_cache_group=self.tp_cpu_group, page_size=self.page_size, hicache_ratio=server_args.hicache_ratio, hicache_size=server_args.hicache_size, hicache_write_policy=server_args.hicache_write_policy, enable_kv_cache_events=self.enable_kv_cache_events, ) elif self.enable_hierarchical_cache: self.tree_cache = HiRadixCache( req_to_token_pool=self.req_to_token_pool, token_to_kv_pool_allocator=self.token_to_kv_pool_allocator, tp_cache_group=( self.attn_tp_cpu_group if self.server_args.enable_dp_attention else self.tp_cpu_group ), page_size=self.page_size, hicache_ratio=server_args.hicache_ratio, hicache_size=server_args.hicache_size, hicache_write_policy=server_args.hicache_write_policy, hicache_io_backend=( "direct" if server_args.attention_backend == "fa3" # hot fix for incompatibility else server_args.hicache_io_backend ), hicache_mem_layout=server_args.hicache_mem_layout, hicache_storage_backend=server_args.hicache_storage_backend, hicache_storage_prefetch_policy=server_args.hicache_storage_prefetch_policy, ) self.tp_worker.register_hicache_layer_transfer_counter( self.tree_cache.cache_controller.layer_done_counter ) elif self.is_hybrid: assert ( self.server_args.disaggregation_mode == "null" ), "Hybrid mode does not support disaggregation yet" self.tree_cache = SWARadixCache( req_to_token_pool=self.req_to_token_pool, token_to_kv_pool_allocator=self.token_to_kv_pool_allocator, sliding_window_size=self.sliding_window_size, page_size=self.page_size, disable=server_args.disable_radix_cache, ) else: self.tree_cache = RadixCache( req_to_token_pool=self.req_to_token_pool, token_to_kv_pool_allocator=self.token_to_kv_pool_allocator, page_size=self.page_size, disable=server_args.disable_radix_cache, enable_kv_cache_events=self.enable_kv_cache_events, ) self.decode_mem_cache_buf_multiplier = ( 1 if self.spec_algorithm.is_none() else ( server_args.speculative_num_draft_tokens + ( server_args.speculative_eagle_topk * server_args.speculative_num_steps ) ) ) embedding_cache_size = int(os.environ.get("SGLANG_VLM_CACHE_SIZE_MB", "100")) init_embedding_cache(embedding_cache_size * 1024 * 1024) def init_disaggregation(self): self.transfer_backend = TransferBackend( self.server_args.disaggregation_transfer_backend ) if ( self.disaggregation_mode == DisaggregationMode.DECODE ): # *2 for the headroom. buffer_size = (self.req_to_token_pool.size) * 2 self.req_to_metadata_buffer_idx_allocator = ReqToMetadataIdxAllocator( buffer_size ) self.disagg_metadata_buffers = MetadataBuffers( buffer_size, hidden_size=self.model_config.hf_text_config.hidden_size, dtype=self.model_config.dtype, custom_mem_pool=self.token_to_kv_pool_allocator.get_kvcache().maybe_get_custom_mem_pool(), ) # The decode requests polling kv cache self.disagg_decode_transfer_queue = DecodeTransferQueue( gloo_group=self.attn_tp_cpu_group, req_to_metadata_buffer_idx_allocator=self.req_to_metadata_buffer_idx_allocator, tp_rank=self.tp_rank, metadata_buffers=self.disagg_metadata_buffers, scheduler=self, tree_cache=self.tree_cache, ) # The decode requests pending for pre-allocation self.disagg_decode_prealloc_queue = DecodePreallocQueue( req_to_token_pool=self.req_to_token_pool, token_to_kv_pool_allocator=self.token_to_kv_pool_allocator, draft_token_to_kv_pool=( None if self.draft_worker is None else self.draft_worker.model_runner.token_to_kv_pool ), req_to_metadata_buffer_idx_allocator=self.req_to_metadata_buffer_idx_allocator, metadata_buffers=self.disagg_metadata_buffers, scheduler=self, transfer_queue=self.disagg_decode_transfer_queue, tree_cache=self.tree_cache, gloo_group=self.attn_tp_cpu_group, tp_rank=self.tp_rank, tp_size=self.tp_size, dp_size=self.server_args.dp_size, gpu_id=self.gpu_id, bootstrap_port=self.server_args.disaggregation_bootstrap_port, max_total_num_tokens=self.max_total_num_tokens, prefill_pp_size=self.server_args.disaggregation_prefill_pp, num_reserved_decode_tokens=self.server_args.num_reserved_decode_tokens, transfer_backend=self.transfer_backend, ) elif self.disaggregation_mode == DisaggregationMode.PREFILL: # *2 for the headroom. buffer_size = self.max_running_requests * 2 self.req_to_metadata_buffer_idx_allocator = ReqToMetadataIdxAllocator( buffer_size ) self.disagg_metadata_buffers = MetadataBuffers( buffer_size, hidden_size=self.model_config.hf_text_config.hidden_size, dtype=self.model_config.dtype, custom_mem_pool=self.token_to_kv_pool_allocator.get_kvcache().maybe_get_custom_mem_pool(), ) self.disagg_prefill_bootstrap_queue = PrefillBootstrapQueue( token_to_kv_pool=self.token_to_kv_pool_allocator.get_kvcache(), draft_token_to_kv_pool=( None if self.draft_worker is None else self.draft_worker.model_runner.token_to_kv_pool ), req_to_metadata_buffer_idx_allocator=self.req_to_metadata_buffer_idx_allocator, metadata_buffers=self.disagg_metadata_buffers, tp_rank=self.tp_rank, tp_size=self.tp_size, gpu_id=self.gpu_id, bootstrap_port=self.server_args.disaggregation_bootstrap_port, gloo_group=self.attn_tp_cpu_group, max_total_num_tokens=self.max_total_num_tokens, decode_tp_size=self.server_args.disaggregation_decode_tp, decode_dp_size=self.server_args.disaggregation_decode_dp, scheduler=self, pp_rank=self.pp_rank, pp_size=self.pp_size, transfer_backend=self.transfer_backend, ) # The prefill requests that are in the middle of kv sending self.disagg_prefill_inflight_queue: List[Req] = [] @DynamicGradMode() def event_loop_normal(self): """A normal scheduler loop.""" while True: recv_reqs = self.recv_requests() self.process_input_requests(recv_reqs) batch = self.get_next_batch_to_run() self.cur_batch = batch if batch: result = self.run_batch(batch) self.process_batch_result(batch, result) else: # When the server is idle, do self-check and re-init some states self.self_check_during_idle() self.last_batch = batch @DynamicGradMode() def event_loop_overlap(self): """A scheduler loop that overlaps the CPU processing and GPU computation.""" self.result_queue = deque() while True: recv_reqs = self.recv_requests() self.process_input_requests(recv_reqs) batch = self.get_next_batch_to_run() self.cur_batch = batch if batch: batch.launch_done = threading.Event() result = self.run_batch(batch) self.result_queue.append((batch.copy(), result)) if self.last_batch is None: # Create a dummy first batch to start the pipeline for overlap schedule. # It is now used for triggering the sampling_info_done event. tmp_batch = ScheduleBatch( reqs=None, forward_mode=ForwardMode.DUMMY_FIRST, next_batch_sampling_info=self.tp_worker.cur_sampling_info, ) self.process_batch_result(tmp_batch, None, batch.launch_done) if self.last_batch: # Process the results of the last batch tmp_batch, tmp_result = self.result_queue.popleft() tmp_batch.next_batch_sampling_info = ( self.tp_worker.cur_sampling_info if batch else None ) # NOTE: we should use current launched batch's launch_done event Instead of the last batch's self.process_batch_result( tmp_batch, tmp_result, batch.launch_done if batch else None ) elif batch is None: # When the server is idle, do self-check and re-init some states self.self_check_during_idle() self.last_batch = batch @DynamicGradMode() def event_loop_pp(self): """A non-overlap scheduler loop for pipeline parallelism.""" mbs = [None] * self.pp_size last_mbs = [None] * self.pp_size self.running_mbs = [ ScheduleBatch(reqs=[], batch_is_full=False) for _ in range(self.pp_size) ] bids = [None] * self.pp_size pp_outputs: Optional[PPProxyTensors] = None while True: server_is_idle = True for mb_id in range(self.pp_size): self.running_batch = self.running_mbs[mb_id] self.last_batch = last_mbs[mb_id] recv_reqs = self.recv_requests() self.process_input_requests(recv_reqs) mbs[mb_id] = self.get_next_batch_to_run() self.running_mbs[mb_id] = self.running_batch self.cur_batch = mbs[mb_id] if self.cur_batch: server_is_idle = False result = self.run_batch(self.cur_batch) # (last rank) send the outputs to the next step if self.pp_group.is_last_rank: if self.cur_batch: next_token_ids, bids[mb_id] = ( result.next_token_ids, result.bid, ) if self.cur_batch.return_logprob: pp_outputs = PPProxyTensors( { "next_token_ids": next_token_ids, "extend_input_len_per_req": result.extend_input_len_per_req, "extend_logprob_start_len_per_req": result.extend_logprob_start_len_per_req, } | ( { f"logits_output.{k}": v for k, v in result.logits_output.__dict__.items() } if result.logits_output is not None else {} ) ) else: pp_outputs = PPProxyTensors( { "next_token_ids": next_token_ids, } ) # send the output from the last round to let the next stage worker run post processing self.pp_group.send_tensor_dict( pp_outputs.tensors, all_gather_group=self.attn_tp_group, ) # receive outputs and post-process (filter finished reqs) the coming microbatch next_mb_id = (mb_id + 1) % self.pp_size next_pp_outputs = None if mbs[next_mb_id] is not None: next_pp_outputs: Optional[PPProxyTensors] = PPProxyTensors( self.pp_group.recv_tensor_dict( all_gather_group=self.attn_tp_group ) ) mbs[next_mb_id].output_ids = next_pp_outputs["next_token_ids"] logits_output_args = { k[len("logits_output.") :]: v for k, v in next_pp_outputs.tensors.items() if k.startswith("logits_output.") } if len(logits_output_args) > 0: logits_output = LogitsProcessorOutput(**logits_output_args) else: logits_output = None output_result = GenerationBatchResult( logits_output=logits_output, pp_hidden_states_proxy_tensors=None, next_token_ids=next_pp_outputs["next_token_ids"], extend_input_len_per_req=next_pp_outputs.tensors.get( "extend_input_len_per_req", None ), extend_logprob_start_len_per_req=next_pp_outputs.tensors.get( "extend_logprob_start_len_per_req", None ), bid=bids[next_mb_id], can_run_cuda_graph=result.can_run_cuda_graph, ) self.process_batch_result(mbs[next_mb_id], output_result) last_mbs[next_mb_id] = mbs[next_mb_id] # (not last rank) if not self.pp_group.is_last_rank: if self.cur_batch: bids[mb_id] = result.bid # carry the outputs to the next stage # send the outputs from the last round to let the next stage worker run post processing if pp_outputs: self.pp_group.send_tensor_dict( pp_outputs.tensors, all_gather_group=self.attn_tp_group, ) # send out reqs to the next stage dp_offset = self.attn_dp_rank * self.attn_tp_size if self.attn_tp_rank == 0: point_to_point_pyobj( recv_reqs, self.pp_rank * self.tp_size + dp_offset, self.world_group.device_group, self.pp_rank * self.tp_size + dp_offset, (self.pp_rank + 1) * self.tp_size + dp_offset, ) # send out proxy tensors to the next stage if self.cur_batch: self.pp_group.send_tensor_dict( result.pp_hidden_states_proxy_tensors, all_gather_group=self.attn_tp_group, ) pp_outputs = next_pp_outputs # When the server is idle, self-check and re-init some states if server_is_idle: # When the server is idle, do self-check and re-init some states self.self_check_during_idle() def recv_requests(self) -> List[Req]: """Receive results at tp_rank = 0 and broadcast it to all other TP ranks.""" if self.recv_skipper is not None: last_forward_mode = ( self.last_batch.forward_mode if self.last_batch is not None else None ) if not self.recv_skipper.handle(last_forward_mode): return [] if self.pp_rank == 0: if self.attn_tp_rank == 0: recv_reqs = [] while True: try: recv_req = self.recv_from_tokenizer.recv_pyobj(zmq.NOBLOCK) except zmq.ZMQError: break recv_reqs.append(recv_req) while True: try: recv_rpc = self.recv_from_rpc.recv_pyobj(zmq.NOBLOCK) except zmq.ZMQError: break recv_reqs.append(recv_rpc) else: recv_reqs = None else: if self.attn_tp_rank == 0: dp_offset = self.attn_dp_rank * self.attn_tp_size recv_reqs = point_to_point_pyobj( [], self.pp_rank * self.tp_size + dp_offset, self.world_group.device_group, (self.pp_rank - 1) * self.tp_size + dp_offset, self.pp_rank * self.tp_size + dp_offset, ) else: recv_reqs = None if self.input_blocker is not None: recv_reqs = self.input_blocker.handle(recv_reqs) if self.server_args.enable_dp_attention: if self.attn_tp_rank == 0: work_reqs = [ req for req in recv_reqs if isinstance( req, (TokenizedGenerateReqInput, TokenizedEmbeddingReqInput) ) ] control_reqs = [ req for req in recv_reqs if not isinstance( req, (TokenizedGenerateReqInput, TokenizedEmbeddingReqInput) ) ] else: work_reqs = None control_reqs = None if self.attn_tp_size != 1: work_reqs = broadcast_pyobj( work_reqs, self.attn_tp_group.rank, self.attn_tp_cpu_group, src=self.attn_tp_group.ranks[0], ) if self.tp_size != 1: control_reqs = broadcast_pyobj( control_reqs, self.tp_group.rank, self.tp_cpu_group, src=self.tp_group.ranks[0], ) recv_reqs = work_reqs + control_reqs elif self.tp_size != 1: recv_reqs = broadcast_pyobj( recv_reqs, self.tp_group.rank, self.tp_cpu_group, src=self.tp_group.ranks[0], ) return recv_reqs def process_input_requests(self, recv_reqs: List): for recv_req in recv_reqs: # If it is a health check generation request and there are running requests, ignore it. if is_health_check_generate_req(recv_req) and ( self.chunked_req is not None or not self.running_batch.is_empty() ): self.return_health_check_ct += 1 continue # If it is a work request, accept or reject the request based on the request queue size. if is_work_request(recv_req): if len(self.waiting_queue) + 1 > self.max_queued_requests: abort_req = AbortReq( recv_req.rid, finished_reason={ "type": "abort", "status_code": HTTPStatus.SERVICE_UNAVAILABLE, "message": "The request queue is full.", }, ) self.send_to_tokenizer.send_pyobj(abort_req) continue output = self._request_dispatcher(recv_req) if output is not None: if isinstance(output, RpcReqOutput): if self.recv_from_rpc is not None: self.recv_from_rpc.send_pyobj(output) else: self.send_to_tokenizer.send_pyobj(output) def handle_generate_request( self, recv_req: TokenizedGenerateReqInput, ): if ( self.server_args.enable_dp_attention and self.server_args.load_balance_method == "minimum_tokens" ): self.recv_dp_balance_id_this_term.append(recv_req.dp_balance_id) # Create a new request if ( recv_req.session_params is None or recv_req.session_params.id is None or recv_req.session_params.id not in self.sessions ): if recv_req.input_embeds is not None: # Generate fake input_ids based on the length of input_embeds seq_length = len(recv_req.input_embeds) fake_input_ids = [1] * seq_length recv_req.input_ids = fake_input_ids if recv_req.bootstrap_port is None: # Use default bootstrap port recv_req.bootstrap_port = self.server_args.disaggregation_bootstrap_port req = Req( recv_req.rid, recv_req.input_text, recv_req.input_ids, recv_req.sampling_params, return_logprob=recv_req.return_logprob, top_logprobs_num=recv_req.top_logprobs_num, token_ids_logprob=recv_req.token_ids_logprob, stream=recv_req.stream, lora_id=recv_req.lora_id, input_embeds=recv_req.input_embeds, custom_logit_processor=recv_req.custom_logit_processor, return_hidden_states=recv_req.return_hidden_states, eos_token_ids=self.model_config.hf_eos_token_id, bootstrap_host=recv_req.bootstrap_host, bootstrap_port=recv_req.bootstrap_port, bootstrap_room=recv_req.bootstrap_room, data_parallel_rank=recv_req.data_parallel_rank, vocab_size=self.model_config.vocab_size, ) req.tokenizer = self.tokenizer if self.disaggregation_mode != DisaggregationMode.NULL: # Invalid request for disaggregated mode if recv_req.bootstrap_room is None: error_msg = ( f"Invalid request: Disaggregated request received without " f"boostrap room id. {req.rid=}" ) logger.error(error_msg) prepare_abort(req, error_msg) self.stream_output([req], req.return_logprob) return if ( recv_req.session_params is not None and recv_req.session_params.id is not None ): req.set_finish_with_abort( f"Invalid request: session id {recv_req.session_params.id} does not exist" ) self._add_request_to_queue(req) return else: # Create a new request from a previous session session = self.sessions[recv_req.session_params.id] req = session.create_req(recv_req, self.tokenizer) if isinstance(req.finished_reason, FINISH_ABORT): self._add_request_to_queue(req) return # Handle multimodal inputs if recv_req.mm_inputs is not None: image_inputs = MultimodalInputs.from_dict(recv_req.mm_inputs) # Expand a single image token into multiple dummy tokens for receiving image embeddings req.origin_input_ids = self.pad_input_ids_func( req.origin_input_ids, image_inputs ) req.extend_image_inputs(image_inputs) if len(req.origin_input_ids) >= self.max_req_input_len: req.set_finish_with_abort( error_msg=( "Multimodal prompt is too long after expanding multimodal tokens. " f"After expanding {len(req.origin_input_ids_unpadded)=} => {len(req.origin_input_ids)} >= {self.max_req_input_len}." ) ) self._add_request_to_queue(req) return # Validate prompt length error_msg = validate_input_length( req, self.max_req_input_len, self.server_args.allow_auto_truncate, ) if error_msg: req.set_finish_with_abort(error_msg) self._add_request_to_queue(req) return # Copy more attributes if recv_req.logprob_start_len == -1 or not recv_req.return_logprob: # By default, only return the logprobs for output tokens req.logprob_start_len = len(req.origin_input_ids) - 1 else: req.logprob_start_len = recv_req.logprob_start_len if req.logprob_start_len >= len(req.origin_input_ids): error_msg = f"{req.logprob_start_len=} is higher than the number of input tokens {len(req.origin_input_ids)=}. Please use a smaller logprob_start_len." req.logprob_start_len = len(req.origin_input_ids) - 1 req.set_finish_with_abort(error_msg) self._add_request_to_queue(req) return req.sampling_params.max_new_tokens = min( ( req.sampling_params.max_new_tokens if req.sampling_params.max_new_tokens is not None else 1 << 30 ), self.max_req_len - len(req.origin_input_ids) - 1, ) # Init grammar cache for this request add_to_grammar_queue = False if ( req.sampling_params.json_schema is not None or req.sampling_params.regex is not None or req.sampling_params.ebnf is not None or req.sampling_params.structural_tag is not None ): assert self.grammar_backend is not None if req.sampling_params.json_schema is not None: key = ("json", req.sampling_params.json_schema) elif req.sampling_params.regex is not None: key = ("regex", req.sampling_params.regex) elif req.sampling_params.ebnf is not None: key = ("ebnf", req.sampling_params.ebnf) elif req.sampling_params.structural_tag: key = ("structural_tag", req.sampling_params.structural_tag) value, cache_hit = self.grammar_backend.get_cached_or_future_value(key) req.grammar = value if not cache_hit: req.grammar_key = key add_to_grammar_queue = True else: if value is INVALID_GRAMMAR_OBJ: # We hit a cached invalid grammar. error_msg = f"Invalid grammar request with cache hit: {key=}" req.set_finish_with_abort(error_msg) if add_to_grammar_queue: req.queue_time_start = time.perf_counter() self.grammar_queue.append(req) else: self._add_request_to_queue(req) def _add_request_to_queue(self, req: Req): req.queue_time_start = time.perf_counter() if self.disaggregation_mode == DisaggregationMode.PREFILL: self._prefetch_kvcache(req) self.disagg_prefill_bootstrap_queue.add( req, self.model_config.num_key_value_heads ) elif self.disaggregation_mode == DisaggregationMode.DECODE: self.disagg_decode_prealloc_queue.add(req) else: self._prefetch_kvcache(req) self.waiting_queue.append(req) def _prefetch_kvcache(self, req: Req): if self.enable_hicache_storage: req.init_next_round_input(self.tree_cache) last_hash = req.last_host_node.get_last_hash_value() matched_len = len(req.prefix_indices) + req.host_hit_length # todo, free-form fetching, calculating hash keys on the fly if (matched_len > 0 and last_hash is not None) or matched_len == 0: new_input_tokens = req.fill_ids[matched_len:] self.tree_cache.prefetch_from_storage( req.rid, req.last_host_node, new_input_tokens, last_hash ) def _extend_requests_to_queue(self, reqs: List[Req], is_retracted: bool = False): if self.disaggregation_mode == DisaggregationMode.PREFILL: self.disagg_prefill_bootstrap_queue.extend( reqs, self.model_config.num_key_value_heads ) elif self.disaggregation_mode == DisaggregationMode.DECODE: # If this is a decode server, we put the request to the decode pending prealloc queue self.disagg_decode_prealloc_queue.extend(reqs, is_retracted) else: self.waiting_queue.extend(reqs) def handle_embedding_request( self, recv_req: TokenizedEmbeddingReqInput, ): req = Req( recv_req.rid, recv_req.input_text, recv_req.input_ids, recv_req.sampling_params, token_type_ids=recv_req.token_type_ids, ) req.tokenizer = self.tokenizer # Handle multimodal inputs if recv_req.image_inputs is not None: image_inputs = MultimodalInputs.from_dict(recv_req.image_inputs) # Expand a single image token into multiple dummy tokens for receiving image embeddings req.origin_input_ids = self.pad_input_ids_func( req.origin_input_ids, image_inputs ) req.extend_image_inputs(image_inputs) if len(req.origin_input_ids) >= self.max_req_input_len: req.set_finish_with_abort( error_msg=( "Multimodal prompt is too long after expanding multimodal tokens. " f"After expanding {len(req.origin_input_ids_unpadded)=} => {len(req.origin_input_ids)} >= {self.max_req_input_len}." ) ) self._add_request_to_queue(req) return # Validate prompts length error_msg = validate_input_length( req, self.max_req_input_len, self.server_args.allow_auto_truncate, ) if error_msg: self._add_request_to_queue(req) return # Copy more attributes req.logprob_start_len = len(req.origin_input_ids) - 1 self._add_request_to_queue(req) def self_check_during_idle(self): self.check_memory() self.check_tree_cache() self.new_token_ratio = self.init_new_token_ratio self.maybe_sleep_on_idle() def check_memory(self): if self.is_hybrid: ( full_num_used, swa_num_used, _, _, full_available_size, full_evictable_size, swa_available_size, swa_evictable_size, ) = self._get_swa_token_info() memory_leak = full_num_used != 0 or swa_num_used != 0 token_msg = ( f"{self.full_tokens_per_layer=}, {full_available_size=}, {full_evictable_size=}, {self.tree_cache.full_protected_size()=}\n" f"{self.swa_tokens_per_layer=}, {swa_available_size=}, {swa_evictable_size=}, {self.tree_cache.swa_protected_size()=}\n" ) else: _, _, available_size, evictable_size = self._get_token_info() protected_size = self.tree_cache.protected_size() memory_leak = (available_size + evictable_size) != ( self.max_total_num_tokens if not self.enable_hierarchical_cache else self.max_total_num_tokens - protected_size ) token_msg = f"{self.max_total_num_tokens=}, {available_size=}, {evictable_size=}, {protected_size=}\n" if memory_leak: msg = "token_to_kv_pool_allocator memory leak detected! " f"{token_msg}" raise ValueError(msg) if self.disaggregation_mode == DisaggregationMode.DECODE: req_total_size = ( self.req_to_token_pool.size + self.req_to_token_pool.pre_alloc_size ) else: req_total_size = self.req_to_token_pool.size if len(self.req_to_token_pool.free_slots) != req_total_size: msg = ( "req_to_token_pool memory leak detected!" f"available_size={len(self.req_to_token_pool.free_slots)}, " f"total_size={self.req_to_token_pool.size}\n" ) raise ValueError(msg) if ( self.enable_metrics and self.current_scheduler_metrics_enabled() and time.perf_counter() > self.metrics_collector.last_log_time + 30 ): # During idle time, also collect metrics every 30 seconds. if self.is_hybrid: ( full_num_used, swa_num_used, full_token_usage, swa_token_usage, _, _, _, _, ) = self._get_swa_token_info() num_used = max(full_num_used, swa_num_used) token_usage = max(full_token_usage, swa_token_usage) else: num_used, token_usage, _, _ = self._get_token_info() num_running_reqs = len(self.running_batch.reqs) self.stats.num_running_reqs = num_running_reqs self.stats.num_used_tokens = num_used self.stats.token_usage = round(token_usage, 2) self.stats.gen_throughput = 0 self.stats.num_queue_reqs = len(self.waiting_queue) self.stats.num_grammar_queue_reqs = len(self.grammar_queue) self.metrics_collector.log_stats(self.stats) self._publish_kv_events() def check_tree_cache(self): if self.is_hybrid and isinstance(self.tree_cache, SWARadixCache): self.tree_cache.sanity_check() def _get_token_info(self): available_size = self.token_to_kv_pool_allocator.available_size() evictable_size = self.tree_cache.evictable_size() num_used = self.max_total_num_tokens - (available_size + evictable_size) token_usage = num_used / self.max_total_num_tokens return num_used, token_usage, available_size, evictable_size def _get_swa_token_info(self): full_available_size = self.token_to_kv_pool_allocator.full_available_size() full_evictable_size = self.tree_cache.full_evictable_size() swa_available_size = self.token_to_kv_pool_allocator.swa_available_size() swa_evictable_size = self.tree_cache.swa_evictable_size() full_num_used = self.full_tokens_per_layer - ( full_available_size + full_evictable_size ) swa_num_used = self.swa_tokens_per_layer - ( swa_available_size + swa_evictable_size ) full_token_usage = full_num_used / self.full_tokens_per_layer swa_token_usage = swa_num_used / self.swa_tokens_per_layer return ( full_num_used, swa_num_used, full_token_usage, swa_token_usage, full_available_size, full_evictable_size, swa_available_size, swa_evictable_size, ) def get_next_batch_to_run(self) -> Optional[ScheduleBatch]: # Merge the prefill batch into the running batch chunked_req_to_exclude = set() if self.chunked_req: # Move the chunked request out of the batch so that we can merge # only finished requests to running_batch. chunked_req_to_exclude.add(self.chunked_req) self.tree_cache.cache_unfinished_req(self.chunked_req) # chunked request keeps its rid but will get a new req_pool_idx self.req_to_token_pool.free(self.chunked_req.req_pool_idx) if self.last_batch and self.last_batch.forward_mode.is_extend(): if self.last_batch.chunked_req is not None: # In the context pipeline parallelism, after the last chunk, the current microbatch still track outdated chunked_req. # We need to discard it. chunked_req_to_exclude.add(self.last_batch.chunked_req) # Filter batch last_bs = self.last_batch.batch_size() self.last_batch.filter_batch( chunked_req_to_exclude=list(chunked_req_to_exclude) ) if self.last_batch.batch_size() < last_bs: self.running_batch.batch_is_full = False # Merge the new batch into the running batch if not self.last_batch.is_empty(): if self.running_batch.is_empty(): self.running_batch = self.last_batch else: # Merge running_batch with prefill batch self.running_batch.merge_batch(self.last_batch) new_batch = self.get_new_batch_prefill() need_dp_attn_preparation = require_mlp_sync(self.server_args) if need_dp_attn_preparation and not self.spec_algorithm.is_none(): # In speculative decoding, prefill batches and decode batches cannot be processed in the same DP attention group. # We prepare idle batches in advance to skip preparing decode batches when there are prefill batches in the group. new_batch = self.prepare_mlp_sync_batch(new_batch) need_dp_attn_preparation = new_batch is None if new_batch is not None: # Run prefill first if possible ret = new_batch else: # Run decode if not self.running_batch.is_empty(): self.running_batch = self.update_running_batch(self.running_batch) ret = self.running_batch if not self.running_batch.is_empty() else None else: ret = None # Handle DP attention if need_dp_attn_preparation: if ( self.server_args.load_balance_method == "minimum_tokens" and self.forward_ct % 40 == 0 ): self.handle_dp_balance_data(ret) ret = self.prepare_mlp_sync_batch(ret) return ret def get_num_allocatable_reqs(self, running_bs): res = global_server_args_dict["max_micro_batch_size"] - running_bs if self.pp_size > 1: res = min(res, self.req_to_token_pool.available_size()) return res def get_new_batch_prefill(self) -> Optional[ScheduleBatch]: # Check if the grammar is ready in the grammar queue if self.grammar_queue: self.move_ready_grammar_requests() # Handle the cases where prefill is not allowed if ( self.running_batch.batch_is_full or len(self.waiting_queue) == 0 ) and self.chunked_req is None: return None running_bs = len(self.running_batch.reqs) # Ignore the check if self.chunked_req is not None. # In the non-PP case, when self.chunked_req is not None, num_allocatable_reqs should always be greater than 0, # as the space for the chunked request has just been released. # In PP case, a chunked req can start in one microbatch and end in another microbatch, so the max_running_requests per microbatch should not be strict. # Instead, we should always allow chunked request to be added, otherwise, there will be a memory leak. if self.get_num_allocatable_reqs(running_bs) <= 0 and not self.chunked_req: self.running_batch.batch_is_full = True return None if self.enable_hierarchical_cache: self.tree_cache.check_hicache_events() # Get priority queue self.policy.calc_priority(self.waiting_queue) # Prefill policy adder = PrefillAdder( self.page_size, self.tree_cache, self.token_to_kv_pool_allocator, self.running_batch, self.new_token_ratio, self.max_prefill_tokens, self.chunked_prefill_size, running_bs if self.is_mixed_chunk else 0, ) if self.chunked_req is not None: self.chunked_req.init_next_round_input() self.chunked_req = adder.add_chunked_req(self.chunked_req) if self.enable_lora: lora_set = set([req.lora_id for req in self.running_batch.reqs]) # Get requests from the waiting queue to a new prefill batch for req in self.waiting_queue: if self.enable_lora and not self.tp_worker.can_run_lora_batch( lora_set | set([req.lora_id for req in adder.can_run_list]) | set([req.lora_id]) ): self.running_batch.batch_is_full = True break if len(adder.can_run_list) >= self.get_num_allocatable_reqs(running_bs): self.running_batch.batch_is_full = True break if self.disaggregation_mode == DisaggregationMode.PREFILL: # In prefill mode, prealloc queue and transfer queue can also take memory, # so we need to check if the available size for the actual available size. if len(adder.can_run_list) >= self.req_to_token_pool.available_size(): self.running_batch.batch_is_full = True break if self.enable_hicache_storage: prefetch_done = self.tree_cache.check_prefetch_progress(req.rid) if not prefetch_done: # skip staging requests that are ongoing prefetch continue req.init_next_round_input(self.tree_cache) res = adder.add_one_req(req, has_chunked_req=(self.chunked_req is not None)) if res != AddReqResult.CONTINUE: if res == AddReqResult.NO_TOKEN: if self.enable_hierarchical_cache: # Set batch_is_full after making sure there are requests that can be served self.running_batch.batch_is_full = len( adder.can_run_list ) > 0 or (not self.running_batch.is_empty()) else: self.running_batch.batch_is_full = True break # Update waiting queue can_run_list: List[Req] = adder.can_run_list if len(can_run_list) == 0: return None if self.enable_metrics: # only record queue time when enable_metrics is True to avoid overhead for req in can_run_list: req.queue_time_end = time.perf_counter() self.waiting_queue = [ x for x in self.waiting_queue if x not in set(can_run_list) ] if adder.new_chunked_req is not None: assert self.chunked_req is None self.chunked_req = adder.new_chunked_req if self.chunked_req: self.chunked_req.is_chunked += 1 # Print stats if self.current_scheduler_metrics_enabled(): self.log_prefill_stats(adder, can_run_list, running_bs) # Create a new batch new_batch = ScheduleBatch.init_new( can_run_list, self.req_to_token_pool, self.token_to_kv_pool_allocator, self.tree_cache, self.model_config, self.enable_overlap, self.spec_algorithm, self.server_args.enable_custom_logit_processor, chunked_req=self.chunked_req, ) if self.enable_hierarchical_cache: # todo (zhiqiang): disable cuda graph execution if hicache loading triggered new_batch.hicache_consumer_index = ( self.tree_cache.ready_to_load_host_cache() ) new_batch.prepare_for_extend() # Mixed-style chunked prefill if ( self.is_mixed_chunk and not self.running_batch.is_empty() and not (new_batch.return_logprob or self.running_batch.return_logprob) ): # TODO (lianmin): support return_logprob + mixed chunked prefill self.running_batch.filter_batch() if not self.running_batch.is_empty(): self.running_batch.prepare_for_decode() new_batch.mix_with_running(self.running_batch) new_batch.decoding_reqs = self.running_batch.reqs self.running_batch = ScheduleBatch( reqs=[], batch_is_full=self.running_batch.batch_is_full ) else: new_batch.decoding_reqs = None return new_batch def update_running_batch(self, batch: ScheduleBatch) -> Optional[ScheduleBatch]: """Update the current running decoding batch.""" initial_bs = batch.batch_size() batch.filter_batch() if batch.is_empty(): batch.batch_is_full = False return batch # Check if decode out of memory if not batch.check_decode_mem(self.decode_mem_cache_buf_multiplier) or ( TEST_RETRACT and batch.batch_size() > 10 ): old_ratio = self.new_token_ratio retracted_reqs, new_token_ratio = batch.retract_decode(self.server_args) num_retracted_reqs = len(retracted_reqs) self.new_token_ratio = new_token_ratio logger.info( "KV cache pool is full. Retract requests. " f"#retracted_reqs: {num_retracted_reqs}, " f"#new_token_ratio: {old_ratio:.4f} -> {self.new_token_ratio:.4f}" ) self._extend_requests_to_queue(retracted_reqs, is_retracted=True) self.total_retracted_reqs += num_retracted_reqs else: self.new_token_ratio = max( self.new_token_ratio - self.new_token_ratio_decay, self.min_new_token_ratio, ) if batch.batch_size() < initial_bs: batch.batch_is_full = False # Update batch tensors batch.prepare_for_decode() return batch def run_batch( self, batch: ScheduleBatch ) -> Union[GenerationBatchResult, EmbeddingBatchResult]: """Run a batch.""" self.forward_ct += 1 # Whether to run the profiler self._profile_batch_predicate(batch) if self.forward_sleep_time is not None: logger.info(f"Scheduler.run_batch sleep {self.forward_sleep_time}s") time.sleep(self.forward_sleep_time) # Run forward if self.is_generation: if self.spec_algorithm.is_none(): model_worker_batch = batch.get_model_worker_batch() # update the consumer index of hicache to the running batch self.tp_worker.set_hicache_consumer( model_worker_batch.hicache_consumer_index ) if self.pp_group.is_last_rank: logits_output, next_token_ids, can_run_cuda_graph = ( self.tp_worker.forward_batch_generation(model_worker_batch) ) else: pp_hidden_states_proxy_tensors, _, can_run_cuda_graph = ( self.tp_worker.forward_batch_generation(model_worker_batch) ) bid = model_worker_batch.bid else: ( logits_output, next_token_ids, bid, num_accepted_tokens, can_run_cuda_graph, ) = self.draft_worker.forward_batch_speculative_generation(batch) bs = batch.batch_size() self.spec_num_total_accepted_tokens += num_accepted_tokens + bs self.spec_num_total_forward_ct += bs self.num_generated_tokens += num_accepted_tokens if self.pp_group.is_last_rank: batch.output_ids = next_token_ids # These 2 values are needed for processing the output, but the values can be # modified by overlap schedule. So we have to copy them here so that # we can use the correct values in output processing. if batch.return_logprob or self.spec_algorithm.is_eagle(): extend_input_len_per_req = [req.extend_input_len for req in batch.reqs] else: extend_input_len_per_req = None if batch.return_logprob: extend_logprob_start_len_per_req = [ req.extend_logprob_start_len for req in batch.reqs ] else: extend_logprob_start_len_per_req = None ret = GenerationBatchResult( logits_output=logits_output if self.pp_group.is_last_rank else None, pp_hidden_states_proxy_tensors=( pp_hidden_states_proxy_tensors if not self.pp_group.is_last_rank else None ), next_token_ids=next_token_ids if self.pp_group.is_last_rank else None, extend_input_len_per_req=extend_input_len_per_req, extend_logprob_start_len_per_req=extend_logprob_start_len_per_req, bid=bid, can_run_cuda_graph=can_run_cuda_graph, ) else: # embedding or reward model model_worker_batch = batch.get_model_worker_batch() embeddings = self.tp_worker.forward_batch_embedding(model_worker_batch) ret = EmbeddingBatchResult( embeddings=embeddings, bid=model_worker_batch.bid ) return ret def process_batch_result( self, batch: ScheduleBatch, result: Union[GenerationBatchResult, EmbeddingBatchResult], launch_done: Optional[threading.Event] = None, ): if batch.forward_mode.is_decode(): self.process_batch_result_decode(batch, result, launch_done) elif batch.forward_mode.is_extend(): self.process_batch_result_prefill(batch, result, launch_done) elif batch.forward_mode.is_idle(): if self.enable_overlap: self.tp_worker.resolve_last_batch_result(launch_done) self.set_next_batch_sampling_info_done(batch) elif batch.forward_mode.is_dummy_first(): self.set_next_batch_sampling_info_done(batch) self.maybe_send_health_check_signal() def maybe_send_health_check_signal(self): if self.return_health_check_ct: # Return some signal for the health check. # This is used to prevent the health check signal being blocked by long context prefill. # However, one minor issue is that this code path does not check the status of detokenizer manager. self.return_health_check_ct -= 1 self.send_to_tokenizer.send_pyobj(HealthCheckOutput()) def prepare_mlp_sync_batch(self, local_batch: ScheduleBatch): return self.prepare_mlp_sync_batch_raw( local_batch, dp_size=self.server_args.dp_size, attn_tp_size=self.attn_tp_size, tp_group=self.tp_group, get_idle_batch=self.get_idle_batch, disable_cuda_graph=self.server_args.disable_cuda_graph, spec_algorithm=self.spec_algorithm, speculative_num_draft_tokens=self.server_args.speculative_num_draft_tokens, enable_two_batch_overlap=self.server_args.enable_two_batch_overlap, enable_deepep_moe=MoeA2ABackend( self.server_args.moe_a2a_backend ).is_deepep(), deepep_mode=DeepEPMode(self.server_args.deepep_mode), require_mlp_tp_gather=require_mlp_tp_gather(self.server_args), disable_overlap_schedule=self.server_args.disable_overlap_schedule, ) def handle_dp_balance_data(self, local_batch: ScheduleBatch): def gather_dp_balance_info(holding_tokens_list) -> Union[None, List[List[int]]]: """gather recv_dp_balance_id_this_term and holding tokens per worker for dp balance""" recv_list = self.recv_dp_balance_id_this_term assert len(recv_list) <= 511, ( "The number of requests received this round is too large. " "Please increase gather_tensor_size and onfly_info_size." ) # The maximum size of the tensor used for gathering data from all workers. gather_tensor_size = 512 # recv_tensor: | holding_tokens | len(recv_dp_balance_id) | recv_dp_balance_ids recv_tensor = torch.zeros(gather_tensor_size, dtype=torch.int32) recv_tensor[0] = holding_tokens_list recv_tensor[1] = len( recv_list ) # The first element is the length of the list. recv_tensor[2 : len(recv_list) + 2] = torch.tensor( recv_list, dtype=torch.int32 ) if self.tp_rank == 0: gathered_list = [ torch.zeros(gather_tensor_size, dtype=torch.int32) for _ in range(self.balance_meta.num_workers) ] else: gathered_list = None torch.distributed.gather( recv_tensor, gathered_list, group=self.tp_cpu_group ) gathered_id_list_per_worker = None if self.tp_rank == 0: gathered_id_list_per_worker = [] holding_tokens_list = [] for tensor in gathered_list: holding_tokens_list.append(tensor[0].item()) list_length = tensor[1].item() gathered_id_list_per_worker.append( tensor[2 : list_length + 2].tolist() ) return gathered_id_list_per_worker, holding_tokens_list def write_shared_dp_balance_info(new_recv_rid_lists, local_tokens): meta = self.balance_meta with meta.mutex: onfly_list: List[Dict[int, int]] = meta.get_shared_onfly() assert len(new_recv_rid_lists) == len( onfly_list ), "num_worker not equal" # 1.Check if the rid received by each worker this round is present in onfly. # If it is, remove the corresponding onfly item. worker_id = 0 for new_recv_rids, on_fly_reqs in zip(new_recv_rid_lists, onfly_list): for new_recv_rid in new_recv_rids: assert ( new_recv_rid in on_fly_reqs ), f"{new_recv_rid=} not in {worker_id=} {on_fly_reqs=}, data consistency is wrong" del on_fly_reqs[new_recv_rid] worker_id += 1 # 2. Atomically write local_tokens and onfly into shm under the mutex meta.set_shared_onfly_info(onfly_list) meta.set_shared_local_tokens(local_tokens) holding_tokens = self.get_load() new_recv_dp_balance_id_list, holding_token_list = gather_dp_balance_info( holding_tokens ) self.recv_dp_balance_id_this_term.clear() if self.tp_rank == 0: # only first worker write info write_shared_dp_balance_info( new_recv_dp_balance_id_list, holding_token_list ) @staticmethod def prepare_mlp_sync_batch_raw( local_batch: ScheduleBatch, dp_size, attn_tp_size: int, tp_group, get_idle_batch, disable_cuda_graph: bool, spec_algorithm, speculative_num_draft_tokens, enable_two_batch_overlap: bool, enable_deepep_moe: bool, deepep_mode: DeepEPMode, require_mlp_tp_gather: bool, disable_overlap_schedule: bool, ): # Check if other DP workers have running batches if local_batch is None: num_tokens = 0 num_tokens_for_logprob = 0 elif local_batch.forward_mode.is_decode(): num_tokens = local_batch.batch_size() num_tokens_for_logprob = num_tokens else: num_tokens = local_batch.extend_num_tokens num_tokens_for_logprob = sum( [ # We should have at least 1 token for sample in every case. max(extend_len - logprob_start_len, 1) for logprob_start_len, extend_len in zip( local_batch.extend_logprob_start_lens, local_batch.extend_lens ) ] ) if local_batch is None or local_batch.forward_mode.is_decode_or_idle(): can_cuda_graph = 1 else: can_cuda_graph = 0 is_extend_in_batch = ( local_batch.forward_mode.is_extend() if local_batch else False ) tbo_preparer = TboDPAttentionPreparer() if disable_overlap_schedule: group = tp_group.device_group device = tp_group.device else: group = tp_group.cpu_group device = "cpu" local_info = torch.tensor( [ num_tokens, can_cuda_graph, num_tokens_for_logprob, is_extend_in_batch, *tbo_preparer.prepare_all_gather( local_batch, deepep_mode, enable_deepep_moe, enable_two_batch_overlap, ), ], dtype=torch.int64, device=device, ) global_info = torch.empty( (dp_size, attn_tp_size, 6), dtype=torch.int64, device=device, ) torch.distributed.all_gather_into_tensor( global_info.flatten(), local_info, group=group, ) global_num_tokens = global_info[:, 0, 0].tolist() can_cuda_graph = min(global_info[:, 0, 1].tolist()) global_num_tokens_for_logprob = global_info[:, 0, 2].tolist() is_extend_in_batch = global_info[:, 0, 3].tolist() tbo_split_seq_index, global_forward_mode = tbo_preparer.compute_output( global_info[:, :, 4:6] ) if local_batch is None and max(global_num_tokens) > 0: local_batch = get_idle_batch() if local_batch is not None: # TODO: handle the case when moe_dense_tp_size != 1 if not require_mlp_tp_gather: local_batch.global_num_tokens = [num_tokens] local_batch.global_num_tokens_for_logprob = [num_tokens_for_logprob] else: local_batch.global_num_tokens = global_num_tokens local_batch.global_num_tokens_for_logprob = ( global_num_tokens_for_logprob ) local_batch.is_extend_in_batch = any(is_extend_in_batch) local_batch.tbo_split_seq_index = tbo_split_seq_index local_batch.global_forward_mode = global_forward_mode # Check forward mode for cuda graph if not disable_cuda_graph: local_batch.can_run_dp_cuda_graph = can_cuda_graph return local_batch def get_idle_batch(self): idle_batch = ScheduleBatch.init_new( [], self.req_to_token_pool, self.token_to_kv_pool_allocator, self.tree_cache, self.model_config, self.enable_overlap, self.spec_algorithm, self.server_args.enable_custom_logit_processor, ) idle_batch.prepare_for_idle() return idle_batch def move_ready_grammar_requests(self): """Move requests whose grammar objects are ready from grammar_queue to waiting_queue.""" num_ready_reqs = 0 num_timeout_reqs = 0 for req in self.grammar_queue: try: if req.finished(): # It is aborted by AbortReq num_ready_reqs += 1 continue req.grammar = req.grammar.result(timeout=0.03) self.grammar_backend.set_cache(req.grammar_key, req.grammar.copy()) if req.grammar is INVALID_GRAMMAR_OBJ: req.set_finish_with_abort( f"Invalid grammar request: {req.grammar_key=}" ) num_ready_reqs += 1 except futures._base.TimeoutError: req.grammar_wait_ct += 1 # NOTE(lianmin): this timeout is the waiting time of the above line. It is # not the waiting time from it enters the grammar queue. if req.grammar_wait_ct > GRAMMAR_TIMEOUT / 0.03: num_timeout_reqs = 1 break if self.server_args.enable_dp_attention: tp_size = self.attn_tp_size tp_group = self.attn_tp_cpu_group else: tp_size = self.tp_size tp_group = self.tp_cpu_group if tp_size > 1: # Sync across TP ranks to make sure they have the same number of ready requests tensor = torch.tensor([num_ready_reqs, num_timeout_reqs], dtype=torch.int32) torch.distributed.all_reduce( tensor, op=torch.distributed.ReduceOp.MAX, group=tp_group ) num_ready_reqs_max, num_timeout_reqs_max = tensor.tolist() for i in range(num_ready_reqs, num_ready_reqs_max): req = self.grammar_queue[i] if req.finished(): # It is aborted by AbortReq continue req.grammar = req.grammar.result() self.grammar_backend.set_cache(req.grammar_key, req.grammar.copy()) if req.grammar is INVALID_GRAMMAR_OBJ: req.set_finish_with_abort( f"Invalid grammar request: {req.grammar_key=}" ) else: num_ready_reqs_max = num_ready_reqs num_timeout_reqs_max = num_timeout_reqs for i in range(num_ready_reqs, num_ready_reqs + num_timeout_reqs_max): req = self.grammar_queue[i] req.grammar.cancel() error_msg = f"Grammar preprocessing timed out for {req.grammar_key=}" req.set_finish_with_abort(error_msg) self.grammar_backend.set_cache(req.grammar_key, INVALID_GRAMMAR_OBJ) num_ready_reqs = num_ready_reqs_max + num_timeout_reqs_max self._extend_requests_to_queue(self.grammar_queue[:num_ready_reqs]) self.grammar_queue = self.grammar_queue[num_ready_reqs:] def set_next_batch_sampling_info_done(self, batch: ScheduleBatch): if batch.next_batch_sampling_info: if batch.next_batch_sampling_info.grammars is not None: batch.next_batch_sampling_info.update_regex_vocab_mask() self.current_stream.synchronize() batch.next_batch_sampling_info.sampling_info_done.set() def watchdog_thread(self): """A watch dog thread that will try to kill the server itself if one forward batch takes too long.""" self.watchdog_last_forward_ct = 0 self.watchdog_last_time = time.perf_counter() while True: current = time.perf_counter() if self.cur_batch is not None: if self.watchdog_last_forward_ct == self.forward_ct: if current > self.watchdog_last_time + self.watchdog_timeout: break else: self.watchdog_last_forward_ct = self.forward_ct self.watchdog_last_time = current time.sleep(self.watchdog_timeout // 2) if not disable_request_logging(): # Print batch size and memory pool info to check whether there are de-sync issues. if self.is_hybrid: ( _, _, _, _, full_available_size, full_evictable_size, swa_available_size, swa_evictable_size, ) = self._get_swa_token_info() info_msg = ( f"{full_available_size=}, " f"{full_evictable_size=}, " f"{swa_available_size=}, " f"{swa_evictable_size=}, " ) else: _, _, available_size, evictable_size = self._get_token_info() info_msg = f"{available_size=}, " f"{evictable_size=}, " logger.error( f"{self.cur_batch.batch_size()=}, " f"{self.cur_batch.reqs=}, " f"{info_msg}" ) pyspy_dump_schedulers() logger.error(f"Watchdog timeout ({self.watchdog_timeout=})") print(file=sys.stderr, flush=True) print(file=sys.stdout, flush=True) # Wait for some time so that the parent process can print the error. time.sleep(5) self.parent_process.send_signal(signal.SIGQUIT) def flush_cache_wrapped(self, recv_req: FlushCacheReqInput): success = self.flush_cache() return FlushCacheReqOutput(success=success) def flush_cache(self): """Flush the memory pool and cache.""" if ( len(self.waiting_queue) == 0 and self.running_batch.is_empty() and (self.pp_size == 1 or all(x.is_empty() for x in self.running_mbs)) ): self.cur_batch = None self.last_batch = None self.tree_cache.reset() if self.grammar_backend: self.grammar_backend.reset() self.req_to_token_pool.clear() self.token_to_kv_pool_allocator.clear() if not self.spec_algorithm.is_none(): self.draft_worker.model_runner.req_to_token_pool.clear() self.draft_worker.model_runner.token_to_kv_pool_allocator.clear() self.num_generated_tokens = 0 self.forward_ct_decode = 0 self.spec_num_total_accepted_tokens = 0 self.spec_num_total_forward_ct = 0 self.cum_spec_accept_length = 0 self.cum_spec_accept_count = 0 torch.cuda.empty_cache() logger.info("Cache flushed successfully!") if_success = True else: logging.warning( f"Cache not flushed because there are pending requests. " f"#queue-req: {len(self.waiting_queue)}, " f"#running-req: {len(self.running_batch.reqs)}" ) if_success = False return if_success def get_load(self): # TODO(lsyin): use dynamically maintained num_waiting_tokens if self.is_hybrid: load_full = ( self.full_tokens_per_layer - self.token_to_kv_pool_allocator.full_available_size() - self.tree_cache.full_evictable_size() ) load_swa = ( self.swa_tokens_per_layer - self.token_to_kv_pool_allocator.swa_available_size() - self.tree_cache.swa_evictable_size() ) load = max(load_full, load_swa) else: load = ( self.max_total_num_tokens - self.token_to_kv_pool_allocator.available_size() - self.tree_cache.evictable_size() ) load += sum(len(req.origin_input_ids) for req in self.waiting_queue) if self.disaggregation_mode == DisaggregationMode.PREFILL: load += sum( len(req.origin_input_ids) for req in self.disagg_prefill_bootstrap_queue.queue ) elif self.disaggregation_mode == DisaggregationMode.DECODE: load += sum( len(req.req.origin_input_ids) for req in self.disagg_decode_prealloc_queue.queue ) return load def get_internal_state(self, recv_req: GetInternalStateReq): ret = dict(global_server_args_dict) ret["last_gen_throughput"] = self.last_gen_throughput ret["memory_usage"] = { "weight": round( self.tp_worker.worker.model_runner.weight_load_mem_usage, 2 ), "kvcache": round( self.token_to_kv_pool_allocator.get_kvcache().mem_usage, 2 ), "token_capacity": int(self.max_total_num_tokens), } if not _is_cpu: ret["memory_usage"]["cuda_graph"] = round( self.tp_worker.worker.model_runner.cuda_graph_mem_usage, 2 ) if not self.spec_algorithm.is_none() and self.cum_spec_accept_count > 0: ret["avg_spec_accept_length"] = ( self.cum_spec_accept_length / self.cum_spec_accept_count ) if RECORD_STEP_TIME: ret["step_time_dict"] = self.step_time_dict ret["load"] = self.get_load() return GetInternalStateReqOutput(internal_state=ret) def set_internal_state(self, recv_req: SetInternalStateReq): server_args_dict = recv_req.server_args args_allow_update = set( [ "max_micro_batch_size", "speculative_accept_threshold_single", "speculative_accept_threshold_acc", ] ) if_success = True for k, v in server_args_dict.items(): if k not in args_allow_update: logging.warning(f"Updating {k} is not supported.") if_success = False break elif k == "max_micro_batch_size" and ( v > self.max_running_requests // self.pp_size or v < 1 ): logging.warning( f"Updating {k} to {v} is rejected because it is out of the valid range [1, {self.max_running_requests // self.pp_size}]." ) if_success = False break if if_success: if not self.spec_algorithm.is_none() and self.cum_spec_accept_count > 0: avg_spec_accept_length = ( self.cum_spec_accept_length / self.cum_spec_accept_count ) logger.info(f"{avg_spec_accept_length=}") self.cum_spec_accept_length = self.cum_spec_accept_count = 0 for k, v in server_args_dict.items(): global_server_args_dict[k] = v logger.info(f"Global server args updated! {global_server_args_dict=}") return SetInternalStateReqOutput( updated=True, server_args=global_server_args_dict, ) def handle_rpc_request(self, recv_req: RpcReqInput): # Handle RPC requests logger.info( f"handle_rpc_request: {recv_req.method}, param: {recv_req.parameters}" ) success = True exec = None try: func = getattr(self, recv_req.method) func(recv_req.parameters) except Exception as e: success = False exec = e logger.error(f"Failed to call rpc {recv_req.method}: {str(e)}") barrier() return RpcReqOutput(success, "" if not exec else str(exec)) def abort_request(self, recv_req: AbortReq): # Delete requests in the waiting queue to_del = [] for i, req in enumerate(self.waiting_queue): if recv_req.abort_all or req.rid.startswith(recv_req.rid): to_del.append(i) # Sort in reverse order to avoid index issues when deleting for i in reversed(to_del): # Abort method 1: directly pop from the queue # This only works for requests that have not started anything. # We still need to send something back to TokenizerManager to clean up the state. req = self.waiting_queue.pop(i) self.send_to_tokenizer.send_pyobj(AbortReq(req.rid)) logger.debug(f"Abort queued request. {req.rid=}") # Delete the requests in the grammar queue for req in self.grammar_queue: # Abort method 2: call `set_finish_with_abort` # The request will still run one prefill forward pass. # In this case, we change the input_ids to be only one token to make this prefill cheap. if recv_req.abort_all or req.rid.startswith(recv_req.rid): logger.debug(f"Abort grammar queue request. {req.rid=}") if req.grammar: req.grammar.cancel() req.set_finish_with_abort("Aborted by AbortReq.") # Delete requests not in the waiting queue when PD disaggregation is enabled if self.disaggregation_mode == DisaggregationMode.PREFILL: # Abort requests that have not yet been bootstrapped for i, req in enumerate(self.disagg_prefill_bootstrap_queue.queue): logger.debug(f"Abort bootstrap queue request. {req.rid=}") if recv_req.abort_all or req.rid.startswith(recv_req.rid): if hasattr(req.disagg_kv_sender, "abort"): req.disagg_kv_sender.abort() # Abort in-flight requests for i, req in enumerate(self.disagg_prefill_inflight_queue): logger.debug(f"Abort inflight queue request. {req.rid=}") if recv_req.abort_all or req.rid.startswith(recv_req.rid): if hasattr(req.disagg_kv_sender, "abort"): req.disagg_kv_sender.abort() elif self.disaggregation_mode == DisaggregationMode.DECODE: # Abort requests that have not yet finished preallocation for i, decode_req in enumerate(self.disagg_decode_prealloc_queue.queue): logger.debug(f"Abort prealloc queue request. {decode_req.req.rid=}") if recv_req.abort_all or decode_req.req.rid.startswith(recv_req.rid): if hasattr(decode_req.kv_receiver, "abort"): decode_req.kv_receiver.abort() # Abort requests waiting for kvcache to release tree cache for i, decode_req in enumerate(self.disagg_decode_transfer_queue.queue): logger.debug(f"Abort transfer queue request. {decode_req.req.rid=}") if recv_req.abort_all or decode_req.req.rid.startswith(recv_req.rid): if hasattr(decode_req.kv_receiver, "abort"): decode_req.kv_receiver.abort() # Delete requests in the running batch if self.cur_batch is self.running_batch or self.cur_batch is None: reqs = self.running_batch.reqs else: reqs = self.running_batch.reqs + self.cur_batch.reqs for req in reqs: if not req.finished() and ( recv_req.abort_all or req.rid.startswith(recv_req.rid) ): # Abort method 3: set `to_abort=True` # The request will still run one decode forward pass. # Then we reuse all existing code to clean up the KV cache allocation. logger.debug(f"Abort running request. {req.rid=}") req.to_abort = True def _pause_engine(self) -> Tuple[List[Req], int]: raise NotImplementedError() def load_lora_adapter( self, recv_req: LoadLoRAAdapterReqInput ) -> LoadLoRAAdapterReqOutput: """In-place loading a new lora adapter from disk or huggingface.""" result = self.tp_worker.load_lora_adapter(recv_req) return result def unload_lora_adapter( self, recv_req: UnloadLoRAAdapterReqInput ) -> UnloadLoRAAdapterReqOutput: """Unload the lora adapter.""" result = self.tp_worker.unload_lora_adapter(recv_req) return result def slow_down(self, recv_req: SlowDownReqInput): t = recv_req.forward_sleep_time if t is not None and t <= 0: t = None self.forward_sleep_time = t return SlowDownReqOutput() def expert_distribution_handle(self, recv_req: ExpertDistributionReq): if recv_req == ExpertDistributionReq.START_RECORD: get_global_expert_distribution_recorder().start_record() elif recv_req == ExpertDistributionReq.STOP_RECORD: get_global_expert_distribution_recorder().stop_record() elif recv_req == ExpertDistributionReq.DUMP_RECORD: get_global_expert_distribution_recorder().dump_record() else: raise ValueError(f"Unrecognized ExpertDistributionReq value: {recv_req=}") return ExpertDistributionReqOutput() def open_session(self, recv_req: OpenSessionReqInput): # handle error session_id = recv_req.session_id if session_id in self.sessions: logger.warning(f"session id {session_id} already exist, cannot open.") return OpenSessionReqOutput(session_id, False) elif session_id is None: logger.warning("session id is None, cannot open.") return OpenSessionReqOutput(session_id, False) else: self.sessions[session_id] = Session( recv_req.capacity_of_str_len, session_id ) return OpenSessionReqOutput(session_id, True) def close_session(self, recv_req: CloseSessionReqInput): # handle error session_id = recv_req.session_id if session_id not in self.sessions: logger.warning(f"session id {session_id} does not exist, cannot delete.") else: del self.sessions[session_id] def get_print_prefix(self): prefix = "" if self.attn_dp_rank is not None: prefix += f" DP{self.attn_dp_rank}" if self.server_args.tp_size > 1: prefix += f" TP{self.tp_rank}" if self.pp_size > 1: prefix += f" PP{self.pp_rank}" return prefix def current_scheduler_metrics_enabled(self): return self.attn_tp_rank == 0 or self.enable_metrics_for_all_schedulers def maybe_sleep_on_idle(self): if self.idle_sleeper is not None: self.idle_sleeper.maybe_sleep() class IdleSleeper: """ In setups which have long inactivity periods it is desirable to reduce system power consumption when sglang does nothing. This would lead not only to power savings, but also to more CPU thermal headroom when a request eventually comes. This is important in cases when multiple GPUs are connected as each GPU would otherwise pin one thread at 100% CPU usage. The simplest solution is to use zmq.Poller on all sockets that may receive data that needs handling immediately. """ def __init__(self, sockets): self.poller = zmq.Poller() self.last_empty_time = time.time() for s in sockets: self.poller.register(s, zmq.POLLIN) def maybe_sleep(self): self.poller.poll(1000) if ( global_config.torch_empty_cache_interval > 0 and time.time() - self.last_empty_time > global_config.torch_empty_cache_interval ): self.last_empty_time = time.time() torch.cuda.empty_cache() def is_health_check_generate_req(recv_req): return getattr(recv_req, "rid", "").startswith("HEALTH_CHECK") def is_work_request(recv_req): return isinstance(recv_req, (TokenizedGenerateReqInput, TokenizedEmbeddingReqInput)) def run_scheduler_process( server_args: ServerArgs, port_args: PortArgs, gpu_id: int, tp_rank: int, moe_ep_rank: int, pp_rank: int, dp_rank: Optional[int], pipe_writer, balance_meta: Optional[DPBalanceMeta] = None, ): # Generate the prefix prefix = "" if dp_rank is not None: prefix += f" DP{dp_rank}" if server_args.tp_size > 1: prefix += f" TP{tp_rank}" if server_args.ep_size > 1: prefix += f" EP{moe_ep_rank}" if server_args.pp_size > 1: prefix += f" PP{pp_rank}" # Config the process setproctitle.setproctitle(f"sglang::scheduler{prefix.replace(' ', '_')}") faulthandler.enable() kill_itself_when_parent_died() parent_process = psutil.Process().parent() # [For Router] if env var "SGLANG_DP_RANK" exist, set dp_rank to the value of the env var if dp_rank is None and "SGLANG_DP_RANK" in os.environ: dp_rank = int(os.environ["SGLANG_DP_RANK"]) # Configure the logger configure_logger(server_args, prefix=prefix) suppress_other_loggers() # Set cpu affinity to this gpu process if get_bool_env_var("SGLANG_SET_CPU_AFFINITY"): set_gpu_proc_affinity(server_args.tp_size, server_args.nnodes, gpu_id) # Create a scheduler and run the event loop try: scheduler = Scheduler( server_args, port_args, gpu_id, tp_rank, moe_ep_rank, pp_rank, dp_rank, dp_balance_meta=balance_meta, ) pipe_writer.send( { "status": "ready", "max_total_num_tokens": scheduler.max_total_num_tokens, "max_req_input_len": scheduler.max_req_input_len, } ) disaggregation_mode: DisaggregationMode = scheduler.disaggregation_mode if disaggregation_mode == DisaggregationMode.NULL: if server_args.pp_size > 1: scheduler.event_loop_pp() elif scheduler.enable_overlap: scheduler.event_loop_overlap() else: scheduler.event_loop_normal() elif disaggregation_mode == DisaggregationMode.PREFILL: if scheduler.enable_overlap: scheduler.event_loop_overlap_disagg_prefill() else: scheduler.event_loop_normal_disagg_prefill() elif disaggregation_mode == DisaggregationMode.DECODE: if scheduler.enable_overlap: scheduler.event_loop_overlap_disagg_decode() else: scheduler.event_loop_normal_disagg_decode() except Exception: traceback = get_exception_traceback() logger.error(f"Scheduler hit an exception: {traceback}") parent_process.send_signal(signal.SIGQUIT)