worker.py 2.99 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import asyncio
import uuid

import uvloop
21
from common.base_engine import BaseVllmEngine
22
from common.chat_processor import ProcessMixIn
23
24
from common.parser import parse_vllm_args
from vllm.engine.arg_utils import AsyncEngineArgs
25
26
27
28
from vllm.entrypoints.openai.protocol import (
    ChatCompletionRequest,
    ChatCompletionStreamResponse,
)
29
30
from vllm.logger import logger as vllm_logger

31
from dynemo.runtime import DistributedRuntime, dynemo_endpoint, dynemo_worker
Neelay Shah's avatar
Neelay Shah committed
32

33

34
class VllmEngine(BaseVllmEngine, ProcessMixIn):
35
36
37
38
39
    """
    Request handler for the generate endpoint
    """

    def __init__(self, engine_args: AsyncEngineArgs):
40
        super().__init__(engine_args)
41

42
    @dynemo_endpoint(ChatCompletionRequest, ChatCompletionStreamResponse)
43
    async def generate(self, raw_request):
44
45
46
        if self.engine_client is None:
            await self.initialize()

47
48
49
50
51
52
53
54
        vllm_logger.debug(f"Got raw request: {raw_request}")
        (
            request,
            conversation,
            _,
            engine_prompt,
            sampling_params,
        ) = await self._parse_raw_request(raw_request)
55
        request_id = str(uuid.uuid4())
56
57
58
59

        vllm_logger.debug(
            f"Running generate with engine_prompt: {engine_prompt}, sampling_params: {sampling_params}, request_id: {request_id}"
        )
60
61
62
63
64
65
        if self.engine_client is None:
            raise RuntimeError("Engine client not initialized")
        else:
            generator = self.engine_client.generate(
                engine_prompt, sampling_params, request_id
            )
66
67
68

        async for response in await self._stream_response(
            request, generator, request_id, conversation
69
70
        ):
            vllm_logger.debug(f"Generated response: {response}")
71
            yield response
72
73


74
@dynemo_worker()
75
76
77
78
79
async def worker(runtime: DistributedRuntime, engine_args: AsyncEngineArgs):
    """
    Instantiate a `backend` component and serve the `generate` endpoint
    A `Component` can serve multiple endpoints
    """
80
    component = runtime.namespace("dynemo").component("vllm")
81
82
83
    await component.create_service()

    endpoint = component.endpoint("generate")
84
85
86

    async with VllmEngine(engine_args) as engine:
        await endpoint.serve_endpoint(engine.generate)
87
88
89
90
91
92


if __name__ == "__main__":
    uvloop.install()
    engine_args = parse_vllm_args()
    asyncio.run(worker(engine_args))