main.py 9.34 KB
Newer Older
Timothy J. Baek's avatar
Timothy J. Baek committed
1
from fastapi import FastAPI, Depends, HTTPException
2
3
from fastapi.routing import APIRoute
from fastapi.middleware.cors import CORSMiddleware
Timothy J. Baek's avatar
Timothy J. Baek committed
4

5
import logging
6
from fastapi import FastAPI, Request, Depends, status, Response
Timothy J. Baek's avatar
Timothy J. Baek committed
7
from fastapi.responses import JSONResponse
8
9
10
11

from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.responses import StreamingResponse
import json
12
import time
Timothy J. Baek's avatar
Timothy J. Baek committed
13
import requests
14

15
from pydantic import BaseModel, ConfigDict
Timothy J. Baek's avatar
Timothy J. Baek committed
16
17
from typing import Optional, List

18
from utils.utils import get_verified_user, get_current_user, get_admin_user
19
from config import SRC_LOG_LEVELS, ENV
20
from constants import MESSAGES
21
22
23

log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["LITELLM"])
Timothy J. Baek's avatar
Timothy J. Baek committed
24

25

26
27
28
29
30
from config import (
    MODEL_FILTER_ENABLED,
    MODEL_FILTER_LIST,
    DATA_DIR,
    LITELLM_PROXY_PORT,
31
    LITELLM_PROXY_HOST,
32
)
33

34
from litellm.utils import get_llm_provider
35

36
37
import asyncio
import subprocess
Timothy J. Baek's avatar
Timothy J. Baek committed
38
import yaml
Timothy J. Baek's avatar
Timothy J. Baek committed
39

40
app = FastAPI()
Timothy J. Baek's avatar
Timothy J. Baek committed
41

42
origins = ["*"]
Timothy J. Baek's avatar
Timothy J. Baek committed
43

44
45
46
47
48
49
50
app.add_middleware(
    CORSMiddleware,
    allow_origins=origins,
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)
Timothy J. Baek's avatar
Timothy J. Baek committed
51

52

Timothy J. Baek's avatar
Timothy J. Baek committed
53
54
55
56
57
58
59
LITELLM_CONFIG_DIR = f"{DATA_DIR}/litellm/config.yaml"

with open(LITELLM_CONFIG_DIR, "r") as file:
    litellm_config = yaml.safe_load(file)

app.state.CONFIG = litellm_config

60
61
62
# Global variable to store the subprocess reference
background_process = None

Timothy J. Baek's avatar
Timothy J. Baek committed
63

64
65
async def run_background_process(command):
    global background_process
Timothy J. Baek's avatar
Timothy J. Baek committed
66
    log.info("run_background_process")
67
68
69

    try:
        # Log the command to be executed
Timothy J. Baek's avatar
Timothy J. Baek committed
70
        log.info(f"Executing command: {command}")
71
72
73
74
75
        # Execute the command and create a subprocess
        process = await asyncio.create_subprocess_exec(
            *command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE
        )
        background_process = process
Timothy J. Baek's avatar
Timothy J. Baek committed
76
        log.info("Subprocess started successfully.")
77
78
79
80
81

        # Capture STDERR for debugging purposes
        stderr_output = await process.stderr.read()
        stderr_text = stderr_output.decode().strip()
        if stderr_text:
Timothy J. Baek's avatar
Timothy J. Baek committed
82
            log.info(f"Subprocess STDERR: {stderr_text}")
83

Timothy J. Baek's avatar
Timothy J. Baek committed
84
        # log.info output line by line
85
        async for line in process.stdout:
Timothy J. Baek's avatar
Timothy J. Baek committed
86
            log.info(line.decode().strip())
87
88
89

        # Wait for the process to finish
        returncode = await process.wait()
Timothy J. Baek's avatar
Timothy J. Baek committed
90
        log.info(f"Subprocess exited with return code {returncode}")
91
92
93
    except Exception as e:
        log.error(f"Failed to start subprocess: {e}")
        raise  # Optionally re-raise the exception if you want it to propagate
94
95
96


async def start_litellm_background():
Timothy J. Baek's avatar
Timothy J. Baek committed
97
    log.info("start_litellm_background")
98
    # Command to run in the background
99
    command = f"litellm --port {LITELLM_PROXY_PORT} --host {LITELLM_PROXY_HOST} --telemetry False --config ./data/litellm/config.yaml"
Timothy J. Baek's avatar
Timothy J. Baek committed
100

101
    await run_background_process(command)
Timothy J. Baek's avatar
Timothy J. Baek committed
102
103


104
async def shutdown_litellm_background():
Timothy J. Baek's avatar
Timothy J. Baek committed
105
    log.info("shutdown_litellm_background")
106
107
108
109
    global background_process
    if background_process:
        background_process.terminate()
        await background_process.wait()  # Ensure the process has terminated
Timothy J. Baek's avatar
Timothy J. Baek committed
110
        log.info("Subprocess terminated")
111
        background_process = None
112
113


Timothy J. Baek's avatar
Timothy J. Baek committed
114
@app.on_event("startup")
115
async def startup_event():
Timothy J. Baek's avatar
Timothy J. Baek committed
116
    log.info("startup_event")
Timothy J. Baek's avatar
Timothy J. Baek committed
117
    # TODO: Check config.yaml file and create one
118
    asyncio.create_task(start_litellm_background())
Timothy J. Baek's avatar
Timothy J. Baek committed
119
120


121
122
123
124
app.state.MODEL_FILTER_ENABLED = MODEL_FILTER_ENABLED
app.state.MODEL_FILTER_LIST = MODEL_FILTER_LIST


125
126
127
128
129
@app.get("/")
async def get_status():
    return {"status": True}


Timothy J. Baek's avatar
Timothy J. Baek committed
130
async def restart_litellm():
131
132
133
134
135
136
137
138
139
140
    """
    Endpoint to restart the litellm background service.
    """
    log.info("Requested restart of litellm service.")
    try:
        # Shut down the existing process if it is running
        await shutdown_litellm_background()
        log.info("litellm service shutdown complete.")

        # Restart the background service
Timothy J. Baek's avatar
Timothy J. Baek committed
141
142

        asyncio.create_task(start_litellm_background())
143
144
145
146
147
148
149
        log.info("litellm service restart complete.")

        return {
            "status": "success",
            "message": "litellm service restarted successfully.",
        }
    except Exception as e:
Timothy J. Baek's avatar
Timothy J. Baek committed
150
        log.info(f"Error restarting litellm service: {e}")
151
152
153
154
155
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)
        )


Timothy J. Baek's avatar
Timothy J. Baek committed
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
@app.get("/restart")
async def restart_litellm_handler(user=Depends(get_admin_user)):
    return await restart_litellm()


@app.get("/config")
async def get_config(user=Depends(get_admin_user)):
    return app.state.CONFIG


class LiteLLMConfigForm(BaseModel):
    general_settings: Optional[dict] = None
    litellm_settings: Optional[dict] = None
    model_list: Optional[List[dict]] = None
    router_settings: Optional[dict] = None

172
173
    model_config = ConfigDict(protected_namespaces=())

Timothy J. Baek's avatar
Timothy J. Baek committed
174
175
176
177
178
179
180
181
182
183
184
185

@app.post("/config/update")
async def update_config(form_data: LiteLLMConfigForm, user=Depends(get_admin_user)):
    app.state.CONFIG = form_data.model_dump(exclude_none=True)

    with open(LITELLM_CONFIG_DIR, "w") as file:
        yaml.dump(app.state.CONFIG, file)

    await restart_litellm()
    return app.state.CONFIG


Timothy J. Baek's avatar
Timothy J. Baek committed
186
187
188
@app.get("/models")
@app.get("/v1/models")
async def get_models(user=Depends(get_current_user)):
189
190
191
    while not background_process:
        await asyncio.sleep(0.1)

192
    url = f"http://localhost:{LITELLM_PROXY_PORT}/v1"
Timothy J. Baek's avatar
Timothy J. Baek committed
193
194
195
196
    r = None
    try:
        r = requests.request(method="GET", url=f"{url}/models")
        r.raise_for_status()
197

Timothy J. Baek's avatar
Timothy J. Baek committed
198
        data = r.json()
199

Timothy J. Baek's avatar
Timothy J. Baek committed
200
201
202
203
204
205
206
207
        if app.state.MODEL_FILTER_ENABLED:
            if user and user.role == "user":
                data["data"] = list(
                    filter(
                        lambda model: model["id"] in app.state.MODEL_FILTER_LIST,
                        data["data"],
                    )
                )
208

Timothy J. Baek's avatar
Timothy J. Baek committed
209
210
        return data
    except Exception as e:
211

Timothy J. Baek's avatar
Timothy J. Baek committed
212
213
214
215
216
217
218
219
220
        log.exception(e)
        error_detail = "Open WebUI: Server Connection Error"
        if r is not None:
            try:
                res = r.json()
                if "error" in res:
                    error_detail = f"External: {res['error']}"
            except:
                error_detail = f"External: {e}"
221

222
223
224
225
226
227
228
229
230
231
232
233
        return {
            "data": [
                {
                    "id": model["model_name"],
                    "object": "model",
                    "created": int(time.time()),
                    "owned_by": "openai",
                }
                for model in app.state.CONFIG["model_list"]
            ],
            "object": "list",
        }
234
235


236
237
238
239
240
241
242
243
244
@app.get("/model/info")
async def get_model_list(user=Depends(get_admin_user)):
    return {"data": app.state.CONFIG["model_list"]}


class AddLiteLLMModelForm(BaseModel):
    model_name: str
    litellm_params: dict

245
246
    model_config = ConfigDict(protected_namespaces=())

247
248
249
250
251

@app.post("/model/new")
async def add_model_to_config(
    form_data: AddLiteLLMModelForm, user=Depends(get_admin_user)
):
252
253
254
    try:
        get_llm_provider(model=form_data.model_name)
        app.state.CONFIG["model_list"].append(form_data.model_dump())
255

256
257
        with open(LITELLM_CONFIG_DIR, "w") as file:
            yaml.dump(app.state.CONFIG, file)
258

259
        await restart_litellm()
260

261
262
263
264
265
266
        return {"message": MESSAGES.MODEL_ADDED(form_data.model_name)}
    except Exception as e:
        print(e)
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)
        )
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287


class DeleteLiteLLMModelForm(BaseModel):
    id: str


@app.post("/model/delete")
async def delete_model_from_config(
    form_data: DeleteLiteLLMModelForm, user=Depends(get_admin_user)
):
    app.state.CONFIG["model_list"] = [
        model
        for model in app.state.CONFIG["model_list"]
        if model["model_name"] != form_data.id
    ]

    with open(LITELLM_CONFIG_DIR, "w") as file:
        yaml.dump(app.state.CONFIG, file)

    await restart_litellm()

288
    return {"message": MESSAGES.MODEL_DELETED(form_data.id)}
289
290


Timothy J. Baek's avatar
Timothy J. Baek committed
291
292
293
@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
    body = await request.body()
294

295
    url = f"http://localhost:{LITELLM_PROXY_PORT}"
296

Timothy J. Baek's avatar
Timothy J. Baek committed
297
    target_url = f"{url}/{path}"
298

Timothy J. Baek's avatar
Timothy J. Baek committed
299
300
301
    headers = {}
    # headers["Authorization"] = f"Bearer {key}"
    headers["Content-Type"] = "application/json"
302

Timothy J. Baek's avatar
Timothy J. Baek committed
303
    r = None
304

Timothy J. Baek's avatar
Timothy J. Baek committed
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
    try:
        r = requests.request(
            method=request.method,
            url=target_url,
            data=body,
            headers=headers,
            stream=True,
        )

        r.raise_for_status()

        # Check if response is SSE
        if "text/event-stream" in r.headers.get("Content-Type", ""):
            return StreamingResponse(
                r.iter_content(chunk_size=8192),
                status_code=r.status_code,
                headers=dict(r.headers),
            )
        else:
            response_data = r.json()
            return response_data
    except Exception as e:
        log.exception(e)
        error_detail = "Open WebUI: Server Connection Error"
        if r is not None:
            try:
                res = r.json()
                if "error" in res:
                    error_detail = f"External: {res['error']['message'] if 'message' in res['error'] else res['error']}"
            except:
                error_detail = f"External: {e}"

        raise HTTPException(
            status_code=r.status_code if r else 500, detail=error_detail
        )