prefill_worker.py 8.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import asyncio
18
import logging
19
import os
20
import signal
21
import sys
22
23
24
25
26
27
28
29
30
31
32

from pydantic import BaseModel
from utils.nixl import NixlMetadataStore
from utils.prefill_queue import PrefillQueue
from utils.vllm import parse_vllm_args
from vllm.entrypoints.openai.api_server import (
    build_async_engine_client_from_engine_args,
)
from vllm.inputs.data import TokensPrompt
from vllm.remote_prefill import RemotePrefillParams, RemotePrefillRequest

33
from dynamo.sdk import async_on_start, dynamo_context, dynamo_endpoint, service
34

35
36
logger = logging.getLogger(__name__)

37
38
39
40
41
42
43

class RequestType(BaseModel):
    text: str


@service(
    dynamo={
44
        "namespace": "dynamo",
45
46
47
48
49
50
51
52
53
54
55
    },
    resources={"gpu": 1, "cpu": "10", "memory": "20Gi"},
    workers=1,
)
class PrefillWorker:
    def __init__(self):
        class_name = self.__class__.__name__
        self.engine_args = parse_vllm_args(class_name, "")
        self._loaded_metadata = set()
        self.initialized = False
        if self.engine_args.enable_chunked_prefill is not False:
56
            logger.info("Chunked prefill is not supported yet, setting to False")
57
58
59
            self.engine_args.enable_chunked_prefill = False

        if self.engine_args.pipeline_parallel_size != 1:
60
            logger.info("Pipeline parallel size is not supported yet, setting to 1")
61
62
63
            self.engine_args.pipeline_parallel_size = 1

        if self.engine_args.disable_async_output_proc is not True:
64
            logger.info("Async output processing is not supported yet, setting to True")
65
66
67
            self.engine_args.disable_async_output_proc = True

        if self.engine_args.enforce_eager is not True:
68
            logger.info("Prefill must be done eagerly, setting to True")
69
70
            self.engine_args.enforce_eager = True

71
        if self.engine_args.enable_prefix_caching is not False:
72
            logger.info(
73
74
75
76
                "Prefix caching is not supported yet in prefill worker, setting to False"
            )
            self.engine_args.enable_prefix_caching = False

77
    @async_on_start
78
79
80
81
82
83
84
85
86
87
    async def async_init(self):
        self._engine_context = build_async_engine_client_from_engine_args(
            self.engine_args
        )
        if self._engine_context is not None:
            self.engine_client = await self._engine_context.__aenter__()
        else:
            raise RuntimeError("Failed to initialize engine client")
        runtime = dynamo_context["runtime"]
        metadata = self.engine_client.nixl_metadata
88
        self._metadata_store = NixlMetadataStore("dynamo", runtime)
89
        await self._metadata_store.put(metadata.engine_id, metadata)
90
        self.task = asyncio.create_task(self.prefill_queue_handler())
91
92
93
94

        def prefill_queue_handler_cb(fut):
            try:
                fut.result()
95
                logger.info("prefill queue handler exited successfully")
96
            except Exception as e:
97
                logger.error(f"[ERROR] prefill queue handler failed: {e!r}")
98
99
                sys.exit(1)

100
        self.task.add_done_callback(prefill_queue_handler_cb)
101
102
103
104
105
106
107
108
109
110
111
112
113
114

        self.shutdown_requested = False

        # Set up signal handler for graceful shutdown
        # TODO: move to dynamo sdk
        loop = asyncio.get_running_loop()

        def signal_handler():
            # Schedule the shutdown coroutine instead of calling it directly
            asyncio.create_task(self.graceful_shutdown(runtime))

        for sig in (signal.SIGTERM, signal.SIGINT):
            loop.add_signal_handler(sig, signal_handler)

115
        logger.info("PrefillWorker initialized")
116

117
118
119
120
121
122
123
124
125
126
    async def graceful_shutdown(self, runtime):
        logger.info("Received shutdown signal, shutting down DistributedRuntime")
        # first shutdown the vllm engine
        self.shutdown_requested = True
        await asyncio.wait_for(self.task, timeout=None)

        # then shutdown the mock endpoint
        runtime.shutdown()
        logger.info("DistributedRuntime shutdown complete")

127
    def shutdown_vllm_engine(self):
128
        """Shutdown the background loop"""
129
        logger.info("Shutting down vllm engine")
130
131
132
133
134
135
136
137
138
        loop = asyncio.get_event_loop()
        try:
            self.engine_client.close()
            logger.info("PrefillWorker shutdown complete")
        except Exception as e:
            logger.error(f"Error during shutdown: {e}")
        finally:
            loop.stop()

139
    async def prefill_queue_handler(self):
140
        logger.info("Prefill queue handler entered")
141
        prefill_queue_nats_server = os.getenv("NATS_SERVER", "nats://localhost:4222")
142
143
        namespace, _ = PrefillWorker.dynamo_address()  # type: ignore
        prefill_queue_stream_name = f"{namespace}_prefill_queue"
144
145
146
        logger.info(
            f"Prefill queue: {prefill_queue_nats_server}:{prefill_queue_stream_name}"
        )
147
148
149
150
151
152
        self.initialized = True
        # TODO: integrate prefill_queue to a dynamo endpoint
        async with PrefillQueue.get_instance(
            nats_server=prefill_queue_nats_server,
            stream_name=prefill_queue_stream_name,
        ) as prefill_queue:
153
            logger.info("prefill queue handler started")
154
155
156
157
158
            while True:
                # TODO: this might add a small overhead to pull prefill from nats
                # need to test and check how much overhead it is
                prefill_request = await prefill_queue.dequeue_prefill_request()
                if prefill_request is not None:
159
160
161
                    logger.info(
                        f"Dequeued prefill request: {prefill_request.request_id}"
                    )
162
163
                    async for _ in self.generate(prefill_request):
                        pass
164
                if self.shutdown_requested:
165
166
167
168
169
170
171
172
173
174
175
176
                    logger.info(
                        "Shutdown requested, checking if engine has any pending prefill sending requests"
                    )
                    while True:
                        if not await self.engine_client.has_unfinished_requests():
                            break
                        logger.info(
                            "Engine has pending prefill sending requests, rechecking in 1 second..."
                        )
                        await asyncio.sleep(1)
                    self.shutdown_vllm_engine()
                    break
177
178
179
180
181
182
183
184
185
186

    async def generate(self, request: RemotePrefillRequest):
        sampling_params = request.sampling_params
        sampling_params.max_tokens = 1
        sampling_params.min_tokens = 1

        remote_prefill_params = RemotePrefillParams(
            is_remote_decode=True,
            decode_block_ids=request.block_ids,
            decode_engine_id=request.engine_id,
187
            decode_computed_block_ids=request.computed_block_ids,
188
189
190
191
192
193
194
        )

        # TODO check if metadata has changed
        # and reload - currently only loading once
        if request.engine_id not in self._loaded_metadata:
            remote_metadata = await self._metadata_store.get(request.engine_id)
            await self.engine_client.add_remote_nixl_metadata(remote_metadata)
195
            logger.info(
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
                f"Loaded nixl metadata from engine {request.engine_id} into "
                f"engine {self.engine_client.nixl_metadata.engine_id}"
            )
            self._loaded_metadata.add(request.engine_id)

        async for _ in self.engine_client.generate(
            request_id=request.request_id,
            prompt=TokensPrompt(prompt_token_ids=request.prompt_token_ids),
            sampling_params=sampling_params,
            remote_prefill_params=remote_prefill_params,
        ):
            yield

    @dynamo_endpoint()
    async def mock(self, req: RequestType):
        yield f"mock_response: {req}"