vllm.py 8.03 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
import argparse
17
18
import json
import logging
19
from dataclasses import field
20
from typing import Any, AsyncGenerator, List, Optional
21
22

import numpy as np
23
24
25

from triton_distributed.icp.data_plane import DataPlane
from triton_distributed.icp.request_plane import RequestPlane
26
from triton_distributed.runtime import (
27
28
29
30
    Operator,
    RemoteInferenceRequest,
    RemoteInferenceResponse,
    RemoteOperator,
31
)
32
33

from .stages import AggregatedStage, GenerateStage, PrefillStage, Stage
34
35


36
class VllmOperator(Operator):
37
38
39
40
41
42
43
44
45
46
    def __init__(
        self,
        name: str,
        version: int,
        request_plane: RequestPlane,
        data_plane: DataPlane,
        parameters: Optional[dict[str, str | int | bool | bytes]] = field(
            default_factory=dict
        ),
        repository: Optional[str] = None,
47
        logger: Optional[logging.Logger] = None,
48
        triton_core: Optional[Any] = None,
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
    ):
        self.name = name
        self.version = version
        self.request_plane = request_plane
        self.data_plane = data_plane
        if logger is None:
            self.logger = logging.getLogger(__name__)
        else:
            self.logger = logger
        self._stage: Stage

        self._init_stages(parameters)

    async def execute(self, requests: List[RemoteInferenceRequest]) -> None:
        for request in requests:
            response_sender = request.response_sender()
            try:
                inputs, parameters = self._prepare_inputs(request)
                self.logger.debug("Processing request")
                async for response in self._stage(
                    {
                        "inputs": inputs,
                        "parameters": parameters,
                    }
                ):
                    self.logger.debug("Sending response")
                    await response_sender.send(**response)
                    self.logger.debug("Response send")
            except Exception as e:
                self.logger.error(f"Error processing request: {e}")
                await response_sender.send(error=e, final=True)

    def _init_stages(
        self,
        parameters: Optional[dict[str, str | int | bool | bytes]] = field(
            default_factory=dict
        ),
86
87
    ):
        args = argparse.Namespace(**parameters)  # type: ignore
88
        self._stage = AggregatedStage(
89
            model=args.model_name,
90
            tensor_parallel_size=args.baseline_tp_size,
91
92
93
94
95
96
97
98
99
100
101
102
103
            gpu_memory_utilization=args.gpu_memory_utilization,
            max_model_len=args.max_model_len,
            dtype=args.dtype,
            kv_cache_dtype=args.kv_cache_dtype,
            enable_prefix_caching=args.enable_prefix_caching,
            enable_chunked_prefill=args.enable_chunked_prefill,
            enforce_eager=args.enforce_eager,
            ignore_eos=args.ignore_eos,
            max_num_seqs=args.max_num_seqs,
            disable_async_output_proc=args.disable_async_output_proc,
            disable_log_stats=args.disable_log_stats,
        )

104
105
106
107
108
109
110
111
112
113
114
    @staticmethod
    def _prepare_inputs(request: RemoteInferenceRequest):
        inputs, parameters = {}, {}
        for input_name, input_data in request.inputs.items():
            inputs[input_name] = np.from_dlpack(input_data)
        for key, value in request.parameters.items():
            if isinstance(value, str) and value.startswith("JSON:"):
                parameters[key] = json.loads(value[5:])
            else:
                parameters[key] = value
        return inputs, parameters
115
116


117
118
class VllmContextOperator(VllmOperator):
    def _init_stages(
119
120
121
122
123
124
        self,
        parameters: Optional[dict[str, str | int | bool | bytes]] = field(
            default_factory=dict
        ),
    ):
        args = argparse.Namespace(**parameters)  # type: ignore
125
        self._prefill_stage = PrefillStage(
126
            model=args.model_name,
127
128
            tensor_parallel_size=args.context_tp_size,
            generate_tensor_parallel_size=args.generate_tp_size,
129
130
131
132
133
134
135
136
137
138
139
140
            gpu_memory_utilization=args.gpu_memory_utilization,
            max_model_len=args.max_model_len,
            dtype=args.dtype,
            kv_cache_dtype=args.kv_cache_dtype,
            enable_prefix_caching=args.enable_prefix_caching,
            enable_chunked_prefill=args.enable_chunked_prefill,
            enforce_eager=args.enforce_eager,
            ignore_eos=args.ignore_eos,
            max_num_seqs=args.max_num_seqs,
            disable_async_output_proc=args.disable_async_output_proc,
            disable_log_stats=args.disable_log_stats,
        )
141
142
143
        self._generate_operator = RemoteOperator(
            "generate", self.request_plane, self.data_plane
        )
144

145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
    async def execute(self, requests: List[RemoteInferenceRequest]) -> None:
        for request in requests:
            response_sender = request.response_sender()
            try:
                self.logger.info("Processing request")
                inputs, parameters = self._prepare_inputs(request)
                responses = [
                    response
                    async for response in self._prefill_stage(
                        {
                            "inputs": inputs,
                            "parameters": parameters,
                        }
                    )
                ]
                self.logger.info("Prefill finished")
                assert len(responses) == 1
                response = responses[0]
                self.logger.info("Processing generate")
                generate_responses: AsyncGenerator[
                    RemoteInferenceResponse, None
                ] = await self._generate_operator.async_infer(
                    inputs=response["outputs"],
                    parameters={**request.parameters, **response["parameters"]},
                )
                async for generate_response in generate_responses:
                    self.logger.info("Sending response")
                    parameters = {"text": generate_response.parameters["text"]}
                    await response_sender.send(
                        outputs=generate_response.outputs,
                        parameters=parameters,
                        final=generate_response.final,
                        error=generate_response.error,
                    )
                    self.logger.info("Response send")
            except Exception as e:
                self.logger.error(f"Error processing request: {e}")
                await response_sender.send(error=e, final=True)
183
184


185
186
class VllmGenerateOperator(VllmOperator):
    def _init_stages(
187
188
189
190
191
192
        self,
        parameters: Optional[dict[str, str | int | bool | bytes]] = field(
            default_factory=dict
        ),
    ):
        args = argparse.Namespace(**parameters)  # type: ignore
193
194
        args.worker_name = "generate"
        self._stage = GenerateStage(
195
            model=args.model_name,
196
            tensor_parallel_size=args.generate_tp_size,
197
198
199
200
201
202
203
204
205
206
207
208
            gpu_memory_utilization=args.gpu_memory_utilization,
            max_model_len=args.max_model_len,
            dtype=args.dtype,
            kv_cache_dtype=args.kv_cache_dtype,
            enable_prefix_caching=args.enable_prefix_caching,
            enable_chunked_prefill=args.enable_chunked_prefill,
            enforce_eager=args.enforce_eager,
            ignore_eos=args.ignore_eos,
            max_num_seqs=args.max_num_seqs,
            disable_async_output_proc=args.disable_async_output_proc,
            disable_log_stats=args.disable_log_stats,
        )