vllm.py 7.37 KB
Newer Older
1
import argparse
2
3
import json
import logging
4
from dataclasses import field
5
from typing import Any, AsyncGenerator, List, Optional
6
7

import numpy as np
8
9
10

from triton_distributed.icp.data_plane import DataPlane
from triton_distributed.icp.request_plane import RequestPlane
11
from triton_distributed.runtime import (
12
13
14
15
    Operator,
    RemoteInferenceRequest,
    RemoteInferenceResponse,
    RemoteOperator,
16
)
17
18

from .stages import AggregatedStage, GenerateStage, PrefillStage, Stage
19
20


21
class VllmOperator(Operator):
22
23
24
25
26
27
28
29
30
31
    def __init__(
        self,
        name: str,
        version: int,
        request_plane: RequestPlane,
        data_plane: DataPlane,
        parameters: Optional[dict[str, str | int | bool | bytes]] = field(
            default_factory=dict
        ),
        repository: Optional[str] = None,
32
        logger: Optional[logging.Logger] = None,
33
        triton_core: Optional[Any] = None,
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
    ):
        self.name = name
        self.version = version
        self.request_plane = request_plane
        self.data_plane = data_plane
        if logger is None:
            self.logger = logging.getLogger(__name__)
        else:
            self.logger = logger
        self._stage: Stage

        self._init_stages(parameters)

    async def execute(self, requests: List[RemoteInferenceRequest]) -> None:
        for request in requests:
            response_sender = request.response_sender()
            try:
                inputs, parameters = self._prepare_inputs(request)
                self.logger.debug("Processing request")
                async for response in self._stage(
                    {
                        "inputs": inputs,
                        "parameters": parameters,
                    }
                ):
                    self.logger.debug("Sending response")
                    await response_sender.send(**response)
                    self.logger.debug("Response send")
            except Exception as e:
                self.logger.error(f"Error processing request: {e}")
                await response_sender.send(error=e, final=True)

    def _init_stages(
        self,
        parameters: Optional[dict[str, str | int | bool | bytes]] = field(
            default_factory=dict
        ),
71
72
    ):
        args = argparse.Namespace(**parameters)  # type: ignore
73
        self._stage = AggregatedStage(
74
            model=args.model_name,
75
            tensor_parallel_size=args.baseline_tp_size,
76
77
78
79
80
81
82
83
84
85
86
87
88
            gpu_memory_utilization=args.gpu_memory_utilization,
            max_model_len=args.max_model_len,
            dtype=args.dtype,
            kv_cache_dtype=args.kv_cache_dtype,
            enable_prefix_caching=args.enable_prefix_caching,
            enable_chunked_prefill=args.enable_chunked_prefill,
            enforce_eager=args.enforce_eager,
            ignore_eos=args.ignore_eos,
            max_num_seqs=args.max_num_seqs,
            disable_async_output_proc=args.disable_async_output_proc,
            disable_log_stats=args.disable_log_stats,
        )

89
90
91
92
93
94
95
96
97
98
99
    @staticmethod
    def _prepare_inputs(request: RemoteInferenceRequest):
        inputs, parameters = {}, {}
        for input_name, input_data in request.inputs.items():
            inputs[input_name] = np.from_dlpack(input_data)
        for key, value in request.parameters.items():
            if isinstance(value, str) and value.startswith("JSON:"):
                parameters[key] = json.loads(value[5:])
            else:
                parameters[key] = value
        return inputs, parameters
100
101


102
103
class VllmContextOperator(VllmOperator):
    def _init_stages(
104
105
106
107
108
109
        self,
        parameters: Optional[dict[str, str | int | bool | bytes]] = field(
            default_factory=dict
        ),
    ):
        args = argparse.Namespace(**parameters)  # type: ignore
110
        self._prefill_stage = PrefillStage(
111
            model=args.model_name,
112
113
            tensor_parallel_size=args.context_tp_size,
            generate_tensor_parallel_size=args.generate_tp_size,
114
115
116
117
118
119
120
121
122
123
124
125
            gpu_memory_utilization=args.gpu_memory_utilization,
            max_model_len=args.max_model_len,
            dtype=args.dtype,
            kv_cache_dtype=args.kv_cache_dtype,
            enable_prefix_caching=args.enable_prefix_caching,
            enable_chunked_prefill=args.enable_chunked_prefill,
            enforce_eager=args.enforce_eager,
            ignore_eos=args.ignore_eos,
            max_num_seqs=args.max_num_seqs,
            disable_async_output_proc=args.disable_async_output_proc,
            disable_log_stats=args.disable_log_stats,
        )
126
127
128
        self._generate_operator = RemoteOperator(
            "generate", self.request_plane, self.data_plane
        )
129

130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
    async def execute(self, requests: List[RemoteInferenceRequest]) -> None:
        for request in requests:
            response_sender = request.response_sender()
            try:
                self.logger.info("Processing request")
                inputs, parameters = self._prepare_inputs(request)
                responses = [
                    response
                    async for response in self._prefill_stage(
                        {
                            "inputs": inputs,
                            "parameters": parameters,
                        }
                    )
                ]
                self.logger.info("Prefill finished")
                assert len(responses) == 1
                response = responses[0]
                self.logger.info("Processing generate")
                generate_responses: AsyncGenerator[
                    RemoteInferenceResponse, None
                ] = await self._generate_operator.async_infer(
                    inputs=response["outputs"],
                    parameters={**request.parameters, **response["parameters"]},
                )
                async for generate_response in generate_responses:
                    self.logger.info("Sending response")
                    parameters = {"text": generate_response.parameters["text"]}
                    await response_sender.send(
                        outputs=generate_response.outputs,
                        parameters=parameters,
                        final=generate_response.final,
                        error=generate_response.error,
                    )
                    self.logger.info("Response send")
            except Exception as e:
                self.logger.error(f"Error processing request: {e}")
                await response_sender.send(error=e, final=True)
168
169


170
171
class VllmGenerateOperator(VllmOperator):
    def _init_stages(
172
173
174
175
176
177
        self,
        parameters: Optional[dict[str, str | int | bool | bytes]] = field(
            default_factory=dict
        ),
    ):
        args = argparse.Namespace(**parameters)  # type: ignore
178
179
        args.worker_name = "generate"
        self._stage = GenerateStage(
180
            model=args.model_name,
181
            tensor_parallel_size=args.generate_tp_size,
182
183
184
185
186
187
188
189
190
191
192
193
            gpu_memory_utilization=args.gpu_memory_utilization,
            max_model_len=args.max_model_len,
            dtype=args.dtype,
            kv_cache_dtype=args.kv_cache_dtype,
            enable_prefix_caching=args.enable_prefix_caching,
            enable_chunked_prefill=args.enable_chunked_prefill,
            enforce_eager=args.enforce_eager,
            ignore_eos=args.ignore_eos,
            max_num_seqs=args.max_num_seqs,
            disable_async_output_proc=args.disable_async_output_proc,
            disable_log_stats=args.disable_log_stats,
        )