frontend.py 3.22 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
import logging
17
18
19
20
import subprocess
from pathlib import Path

from components.processor import Processor
21
from components.worker import TensorRTLLMWorker
Biswa Panda's avatar
Biswa Panda committed
22
from fastapi import FastAPI
23
24
25
26
27
28
29
from pydantic import BaseModel

from dynamo import sdk
from dynamo.sdk import depends, service
from dynamo.sdk.lib.config import ServiceConfig
from dynamo.sdk.lib.image import DYNAMO_IMAGE

30
31
logger = logging.getLogger(__name__)

32
33
34
35
36
37
38
39
40
41
42
43

def get_http_binary_path():
    sdk_path = Path(sdk.__file__)
    binary_path = sdk_path.parent / "cli/bin/http"
    if not binary_path.exists():
        return "http"
    else:
        return str(binary_path)


class FrontendConfig(BaseModel):
    served_model_name: str
44
45
    endpoint_chat: str
    endpoint_completions: str
46
47
48
49
    port: int = 8080


@service(
Biswa Panda's avatar
Biswa Panda committed
50
51
52
    dynamo={
        "namespace": "dynamo",
    },
53
54
55
    resources={"cpu": "10", "memory": "20Gi"},
    workers=1,
    image=DYNAMO_IMAGE,
Biswa Panda's avatar
Biswa Panda committed
56
    app=FastAPI(title="TensorRT LLM Example"),
57
58
59
60
61
62
63
64
65
66
)
# todo this should be called ApiServer
class Frontend:
    worker = depends(TensorRTLLMWorker)
    processor = depends(Processor)

    def __init__(self):
        config = ServiceConfig.get_instance()
        frontend_config = FrontendConfig(**config.get("Frontend", {}))

67
        # Chat/completions Endpoint
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
        subprocess.run(
            [
                "llmctl",
                "http",
                "remove",
                "chat-models",
                frontend_config.served_model_name,
            ]
        )
        subprocess.run(
            [
                "llmctl",
                "http",
                "add",
                "chat-models",
                frontend_config.served_model_name,
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
                frontend_config.endpoint_chat,
            ]
        )

        # Completions Endpoint
        subprocess.run(
            [
                "llmctl",
                "http",
                "remove",
                "completions",
                frontend_config.served_model_name,
            ]
        )
        subprocess.run(
            [
                "llmctl",
                "http",
                "add",
                "completions",
                frontend_config.served_model_name,
                frontend_config.endpoint_completions,
106
107
108
            ]
        )

109
        logger.info("Starting HTTP server")
110
111
112
113
114
115
116
117
118
        http_binary = get_http_binary_path()
        process = subprocess.Popen(
            [http_binary, "-p", str(frontend_config.port)], stdout=None, stderr=None
        )
        try:
            process.wait()
        except KeyboardInterrupt:
            process.terminate()
            process.wait()