tp_worker.py 14.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
# Copyright 2023-2024 SGLang Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
Lianmin Zheng's avatar
Lianmin Zheng committed
14
"""A tensor parallel worker."""
15
from __future__ import annotations
Lianmin Zheng's avatar
Lianmin Zheng committed
16

Lianmin Zheng's avatar
Lianmin Zheng committed
17
import logging
18
from typing import TYPE_CHECKING, Optional
19
20

import torch
21

22
from sglang.srt.configs.model_config import ModelConfig
23
from sglang.srt.distributed import get_pp_group, get_world_group
24
from sglang.srt.managers.io_struct import (
25
    DestroyWeightsUpdateGroupReqInput,
26
    GetWeightsByNameReqInput,
27
    InitWeightsSendGroupForRemoteInstanceReqInput,
28
    InitWeightsUpdateGroupReqInput,
29
    LoadLoRAAdapterReqInput,
30
    SendWeightsToRemoteInstanceReqInput,
31
    UnloadLoRAAdapterReqInput,
32
    UpdateWeightFromDiskReqInput,
33
    UpdateWeightsFromDistributedReqInput,
34
    UpdateWeightsFromTensorReqInput,
35
)
36
from sglang.srt.managers.schedule_batch import ModelWorkerBatch
37
from sglang.srt.managers.scheduler import GenerationBatchResult
38
39
from sglang.srt.mem_cache.allocator import BaseTokenToKVPoolAllocator
from sglang.srt.mem_cache.memory_pool import ReqToTokenPool
40
from sglang.srt.model_executor.forward_batch_info import ForwardBatch, PPProxyTensors
41
from sglang.srt.model_executor.model_runner import ModelRunner
Mingyi's avatar
Mingyi committed
42
from sglang.srt.server_args import ServerArgs
43
from sglang.srt.utils import MultiprocessingSerializer, broadcast_pyobj, set_random_seed
44
45
46
47
48
49
from sglang.srt.utils.hf_transformers_utils import (
    get_processor,
    get_tokenizer,
    get_tokenizer_from_processor,
)
from sglang.srt.utils.patch_torch import monkey_patch_torch_reductions
50

51
52
53
if TYPE_CHECKING:
    from sglang.srt.managers.cache_controller import LayerDoneCounter

Ying Sheng's avatar
Ying Sheng committed
54
logger = logging.getLogger(__name__)
Lianmin Zheng's avatar
Lianmin Zheng committed
55
56


57
58
59
class TpModelWorker:
    """A tensor parallel model worker."""

60
    def __init__(
Lianmin Zheng's avatar
Lianmin Zheng committed
61
        self,
62
        server_args: ServerArgs,
63
        gpu_id: int,
Lianmin Zheng's avatar
Lianmin Zheng committed
64
        tp_rank: int,
Cheng Wan's avatar
Cheng Wan committed
65
        moe_ep_rank: int,
66
        pp_rank: int,
67
        dp_rank: Optional[int],
Mingyi's avatar
Mingyi committed
68
        nccl_port: int,
69
        is_draft_worker: bool = False,
70
        req_to_token_pool: Optional[ReqToTokenPool] = None,
71
        token_to_kv_pool_allocator: Optional[BaseTokenToKVPoolAllocator] = None,
Lianmin Zheng's avatar
Lianmin Zheng committed
72
    ):
73
        # Parse args
74
        self.tp_size = server_args.tp_size
Lianmin Zheng's avatar
Lianmin Zheng committed
75
        self.tp_rank = tp_rank
Cheng Wan's avatar
Cheng Wan committed
76
        self.moe_ep_rank = moe_ep_rank
77
        self.pp_rank = pp_rank
Lianmin Zheng's avatar
Lianmin Zheng committed
78
79

        # Init model and tokenizer
80
81
82
        self.model_config = ModelConfig.from_server_args(
            server_args,
            model_path=(
83
84
85
86
                server_args.model_path
                if not is_draft_worker
                else server_args.speculative_draft_model_path
            ),
87
88
89
90
91
            model_revision=(
                server_args.revision
                if not is_draft_worker
                else server_args.speculative_draft_model_revision
            ),
92
            is_draft_model=is_draft_worker,
Lianmin Zheng's avatar
Lianmin Zheng committed
93
        )
94

Lianmin Zheng's avatar
Lianmin Zheng committed
95
        self.model_runner = ModelRunner(
Liangsheng Yin's avatar
Liangsheng Yin committed
96
97
            model_config=self.model_config,
            mem_fraction_static=server_args.mem_fraction_static,
98
            gpu_id=gpu_id,
Liangsheng Yin's avatar
Liangsheng Yin committed
99
100
            tp_rank=tp_rank,
            tp_size=server_args.tp_size,
Cheng Wan's avatar
Cheng Wan committed
101
102
            moe_ep_rank=moe_ep_rank,
            moe_ep_size=server_args.ep_size,
103
104
            pp_rank=pp_rank,
            pp_size=server_args.pp_size,
Mingyi's avatar
Mingyi committed
105
            nccl_port=nccl_port,
fzyzcjy's avatar
fzyzcjy committed
106
            dp_rank=dp_rank,
Lianmin Zheng's avatar
Lianmin Zheng committed
107
            server_args=server_args,
108
            is_draft_worker=is_draft_worker,
109
110
            req_to_token_pool=req_to_token_pool,
            token_to_kv_pool_allocator=token_to_kv_pool_allocator,
Lianmin Zheng's avatar
Lianmin Zheng committed
111
        )
112
113
        if server_args.skip_tokenizer_init:
            self.tokenizer = self.processor = None
Lianmin Zheng's avatar
Lianmin Zheng committed
114
        else:
115
            if self.model_config.is_multimodal:
116
117
118
119
                self.processor = get_processor(
                    server_args.tokenizer_path,
                    tokenizer_mode=server_args.tokenizer_mode,
                    trust_remote_code=server_args.trust_remote_code,
120
                    revision=server_args.revision,
121
                )
xm:D's avatar
xm:D committed
122
                self.tokenizer = get_tokenizer_from_processor(self.processor)
123
124
125
126
127
            else:
                self.tokenizer = get_tokenizer(
                    server_args.tokenizer_path,
                    tokenizer_mode=server_args.tokenizer_mode,
                    trust_remote_code=server_args.trust_remote_code,
128
                    revision=server_args.revision,
129
                )
130
        self.device = self.model_runner.device
131

132
133
134
135
        # Init nccl groups
        self.pp_group = get_pp_group()
        self.world_group = get_world_group()

136
        # Profile number of tokens
137
        self.max_total_num_tokens = self.model_runner.max_total_num_tokens
138
        self.max_prefill_tokens = server_args.max_prefill_tokens
Ying Sheng's avatar
Ying Sheng committed
139
        self.max_running_requests = min(
140
141
142
143
            (
                self.max_total_num_tokens // 2
                if server_args.max_running_requests is None
                else server_args.max_running_requests
144
                // (server_args.dp_size if server_args.enable_dp_attention else 1)
145
            ),
146
            self.model_runner.req_to_token_pool.size,
Ying Sheng's avatar
Ying Sheng committed
147
        )
148
        assert self.max_running_requests > 0, "max_running_request is zero"
149
150
        self.max_queued_requests = server_args.max_queued_requests
        assert (
151
152
            self.max_queued_requests is None or self.max_queued_requests >= 1
        ), "If configured, max_queued_requests must be at least 1 for any work to be scheduled."
153
        self.max_req_len = min(
154
155
156
            self.model_config.context_len - 1,
            self.max_total_num_tokens - 1,
        )
157
158
159
160
        self.max_req_input_len = self.max_req_len - 5
        assert (
            self.max_req_len > 0 and self.max_req_input_len > 0
        ), "Memory pool size is too small"
161

Lianmin Zheng's avatar
Lianmin Zheng committed
162
        # Sync random seed across TP workers
163
        self.random_seed = broadcast_pyobj(
164
            [server_args.random_seed],
165
166
167
            self.tp_size * self.pp_rank + tp_rank,
            self.world_group.cpu_group,
            src=self.world_group.ranks[0],
168
        )[0]
169
        set_random_seed(self.random_seed)
170

171
172
        self.hicache_layer_transfer_counter = None

173
    def register_hicache_layer_transfer_counter(self, counter: LayerDoneCounter):
174
175
        self.hicache_layer_transfer_counter = counter

176
    def set_hicache_consumer(self, consumer_index: int):
177
178
179
        if self.hicache_layer_transfer_counter is not None:
            self.hicache_layer_transfer_counter.set_consumer(consumer_index)

180
    def get_worker_info(self):
181
182
        return (
            self.max_total_num_tokens,
183
            self.max_prefill_tokens,
184
            self.max_running_requests,
185
            self.max_queued_requests,
186
            self.max_req_len,
187
188
            self.max_req_input_len,
            self.random_seed,
189
            self.device,
190
191
192
            self.model_runner.req_to_token_pool.size,
            self.model_runner.req_to_token_pool.max_context_len,
            self.model_runner.token_to_kv_pool.size,
193
194
        )

Hanming Lu's avatar
Hanming Lu committed
195
196
197
198
199
200
201
202
203
204
205
206
207
208
    @property
    def sliding_window_size(self) -> Optional[int]:
        return self.model_runner.sliding_window_size

    @property
    def is_hybrid(self) -> bool:
        return self.model_runner.is_hybrid is not None

    def get_tokens_per_layer_info(self):
        return (
            self.model_runner.full_max_total_num_tokens,
            self.model_runner.swa_max_total_num_tokens,
        )

209
210
211
    def get_pad_input_ids_func(self):
        return getattr(self.model_runner.model, "pad_input_ids", None)

212
213
214
215
216
    def get_tp_group(self):
        return self.model_runner.tp_group

    def get_attention_tp_group(self):
        return self.model_runner.attention_tp_group
217

218
    def get_attention_tp_cpu_group(self):
219
        return getattr(self.model_runner.attention_tp_group, "cpu_group", None)
220

221
222
223
    def get_memory_pool(self):
        return (
            self.model_runner.req_to_token_pool,
224
            self.model_runner.token_to_kv_pool_allocator,
Lianmin Zheng's avatar
Lianmin Zheng committed
225
        )
226

227
228
229
    def forward_batch_generation(
        self,
        model_worker_batch: ModelWorkerBatch,
230
        forward_batch: Optional[ForwardBatch] = None,
231
        is_verify: bool = False,
232
        skip_attn_backend_init=False,
233
    ) -> GenerationBatchResult:
234
235
        # FIXME(lsyin): maybe remove skip_attn_backend_init in forward_batch_generation,
        #               which requires preparing replay to always be in this function
236

237
238
239
240
241
242
243
244
        if model_worker_batch is not None:
            # update the consumer index of hicache to the running batch
            self.set_hicache_consumer(model_worker_batch.hicache_consumer_index)

            forward_batch = ForwardBatch.init_new(model_worker_batch, self.model_runner)
        else:
            # FIXME(lsyin): unify the interface of forward_batch
            assert forward_batch is not None
245

246
247
248
249
250
251
252
253
254
        pp_proxy_tensors = None
        if not self.pp_group.is_first_rank:
            pp_proxy_tensors = PPProxyTensors(
                self.pp_group.recv_tensor_dict(
                    all_gather_group=self.get_attention_tp_group()
                )
            )

        if self.pp_group.is_last_rank:
255
            logits_output, can_run_cuda_graph = self.model_runner.forward(
256
257
258
                forward_batch,
                pp_proxy_tensors=pp_proxy_tensors,
                skip_attn_backend_init=skip_attn_backend_init,
259
            )
260
            batch_result = GenerationBatchResult(
261
262
263
                logits_output=logits_output,
                can_run_cuda_graph=can_run_cuda_graph,
            )
264
265
266
267
268
269
270
271
272
273
274
275

            if is_verify:
                # Skip sampling and return logits for target forward
                return batch_result

            if model_worker_batch.delay_sample_launch:
                batch_result.delay_sample_launch = True
                batch_result.forward_batch = forward_batch
                return batch_result

            if model_worker_batch.is_prefill_only:
                # For prefill-only requests, create dummy token IDs on CPU
276
277
278
279
280
                # The size should match the batch size (number of sequences), not total tokens
                batch_result.next_token_ids = torch.zeros(
                    len(model_worker_batch.seq_lens),
                    dtype=torch.long,
                    device=model_worker_batch.input_ids.device,
281
                )
282
283
284
285
                if (
                    model_worker_batch.return_logprob
                    and logits_output.next_token_logits is not None
                ):
286
287
288
289
290
291
292
293
294
295
                    # NOTE: Compute logprobs without full sampling
                    self.model_runner.compute_logprobs_only(
                        logits_output, model_worker_batch
                    )
            else:
                batch_result.next_token_ids = self.model_runner.sample(
                    logits_output, forward_batch
                )

            return batch_result
296
        else:
297
            pp_proxy_tensors, can_run_cuda_graph = self.model_runner.forward(
298
299
                forward_batch,
                pp_proxy_tensors=pp_proxy_tensors,
300
                skip_attn_backend_init=skip_attn_backend_init,
301
            )
302
303
            return GenerationBatchResult(
                pp_hidden_states_proxy_tensors=pp_proxy_tensors,
304
305
                can_run_cuda_graph=can_run_cuda_graph,
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
306

307
308
    def forward_batch_embedding(self, model_worker_batch: ModelWorkerBatch):
        forward_batch = ForwardBatch.init_new(model_worker_batch, self.model_runner)
309
        logits_output, _ = self.model_runner.forward(forward_batch)
310
        embeddings = logits_output.embeddings
311
        return embeddings
312

Chayenne's avatar
Chayenne committed
313
314
    def update_weights_from_disk(self, recv_req: UpdateWeightFromDiskReqInput):
        success, message = self.model_runner.update_weights_from_disk(
315
316
317
            recv_req.model_path, recv_req.load_format
        )
        return success, message
318

319
320
321
322
323
324
325
326
327
328
329
    def init_weights_update_group(self, recv_req: InitWeightsUpdateGroupReqInput):
        success, message = self.model_runner.init_weights_update_group(
            recv_req.master_address,
            recv_req.master_port,
            recv_req.rank_offset,
            recv_req.world_size,
            recv_req.group_name,
            recv_req.backend,
        )
        return success, message

330
331
332
333
334
335
    def destroy_weights_update_group(self, recv_req: DestroyWeightsUpdateGroupReqInput):
        success, message = self.model_runner.destroy_weights_update_group(
            recv_req.group_name,
        )
        return success, message

336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
    def init_weights_send_group_for_remote_instance(
        self, recv_req: InitWeightsSendGroupForRemoteInstanceReqInput
    ):
        success, message = (
            self.model_runner.init_weights_send_group_for_remote_instance(
                recv_req.master_address,
                recv_req.ports,
                recv_req.group_rank,
                recv_req.world_size,
                recv_req.group_name,
                recv_req.backend,
            )
        )
        return success, message

    def send_weights_to_remote_instance(
        self, recv_req: SendWeightsToRemoteInstanceReqInput
    ):
        success, message = self.model_runner.send_weights_to_remote_instance(
            recv_req.master_address,
            recv_req.ports,
            recv_req.group_name,
        )
        return success, message

361
362
363
364
    def update_weights_from_distributed(
        self, recv_req: UpdateWeightsFromDistributedReqInput
    ):
        success, message = self.model_runner.update_weights_from_distributed(
365
            recv_req.names, recv_req.dtypes, recv_req.shapes, recv_req.group_name
366
367
368
        )
        return success, message

369
    def update_weights_from_tensor(self, recv_req: UpdateWeightsFromTensorReqInput):
370
371

        monkey_patch_torch_reductions()
372
        success, message = self.model_runner.update_weights_from_tensor(
373
            named_tensors=MultiprocessingSerializer.deserialize(
374
                recv_req.serialized_named_tensors[self.tp_rank]
375
376
            ),
            load_format=recv_req.load_format,
377
378
379
        )
        return success, message

380
381
382
383
384
    def get_weights_by_name(self, recv_req: GetWeightsByNameReqInput):
        parameter = self.model_runner.get_weights_by_name(
            recv_req.name, recv_req.truncate_size
        )
        return parameter
385
386

    def load_lora_adapter(self, recv_req: LoadLoRAAdapterReqInput):
387
        result = self.model_runner.load_lora_adapter(recv_req.to_ref())
388
389
390
        return result

    def unload_lora_adapter(self, recv_req: UnloadLoRAAdapterReqInput):
391
        result = self.model_runner.unload_lora_adapter(recv_req.to_ref())
392
        return result
393
394
395

    def can_run_lora_batch(self, lora_ids: list[str]) -> bool:
        return self.model_runner.lora_manager.validate_lora_batch(lora_ids)