frontend.py 3.72 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
import logging
17
import subprocess
18
from pathlib import Path
19

20
from components.planner_service import Planner
21
22
from components.processor import Processor
from components.worker import VllmWorker
23
from fastapi import FastAPI
24
25
from pydantic import BaseModel

26
from dynamo import sdk
27
from dynamo.sdk import async_on_shutdown, depends, service
28
from dynamo.sdk.lib.config import ServiceConfig
29
from dynamo.sdk.lib.image import DYNAMO_IMAGE
30

31
32
logger = logging.getLogger(__name__)

33

34
def get_http_binary_path():
35
    """Find the HTTP binary path in SDK or fallback to 'http' command."""
36
37
38
39
40
41
42
43
    sdk_path = Path(sdk.__file__)
    binary_path = sdk_path.parent / "cli/bin/http"
    if not binary_path.exists():
        return "http"
    else:
        return str(binary_path)


44
class FrontendConfig(BaseModel):
45
46
    """Configuration for the Frontend service including model and HTTP server settings."""

47
    served_model_name: str
48
49
50
51
    endpoint: str
    port: int = 8080


52
# todo this should be called ApiServer
53
@service(
54
55
56
57
    dynamo={
        "enabled": True,
        "namespace": "dynamo",
    },
58
59
    resources={"cpu": "10", "memory": "20Gi"},
    workers=1,
60
    image=DYNAMO_IMAGE,
61
    app=FastAPI(title="LLM Example"),
62
63
)
class Frontend:
64
    planner = depends(Planner)
65
66
67
68
    worker = depends(VllmWorker)
    processor = depends(Processor)

    def __init__(self):
69
        """Initialize Frontend service with HTTP server and model configuration."""
70
71
        config = ServiceConfig.get_instance()
        frontend_config = FrontendConfig(**config.get("Frontend", {}))
72
73
74
75
76
77
78
        self.frontend_config = frontend_config
        self.process = None

        self.setup_model()
        self.start_http_server()

    def setup_model(self):
79
        """Configure the model for HTTP service using llmctl."""
80
        subprocess.run(
81
82
83
84
85
            [
                "llmctl",
                "http",
                "remove",
                "chat-models",
86
                self.frontend_config.served_model_name,
87
88
            ],
            check=False,
89
90
91
92
93
94
95
        )
        subprocess.run(
            [
                "llmctl",
                "http",
                "add",
                "chat-models",
96
97
                self.frontend_config.served_model_name,
                self.frontend_config.endpoint,
98
99
            ],
            check=False,
100
101
        )

102
    def start_http_server(self):
103
        """Start the HTTP server on the configured port."""
104
        logger.info("Starting HTTP server")
105
        http_binary = get_http_binary_path()
106

107
108
109
110
        self.process = subprocess.Popen(
            [http_binary, "-p", str(self.frontend_config.port)],
            stdout=None,
            stderr=None,
111
        )
112

113
    @async_on_shutdown
114
    def cleanup(self):
115
116
117
        """Clean up resources before shutdown."""

        # circusd manages shutdown of http server process, we just need to remove the model using the on_shutdown hook
118
119
120
121
122
123
124
        subprocess.run(
            [
                "llmctl",
                "http",
                "remove",
                "chat-models",
                self.frontend_config.served_model_name,
125
126
            ],
            check=False,
127
        )