Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
open-webui
Commits
b5918914
Unverified
Commit
b5918914
authored
Apr 24, 2024
by
Timothy Jaeryang Baek
Committed by
GitHub
Apr 24, 2024
Browse files
Merge pull request #1704 from cheahjs/feat/litellm-config
parents
589de36a
5245d037
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
32 additions
and
8 deletions
+32
-8
backend/apps/litellm/main.py
backend/apps/litellm/main.py
+23
-8
backend/config.py
backend/config.py
+9
-0
No files found.
backend/apps/litellm/main.py
View file @
b5918914
import
sys
from
fastapi
import
FastAPI
,
Depends
,
HTTPException
from
fastapi.routing
import
APIRoute
from
fastapi.middleware.cors
import
CORSMiddleware
...
...
@@ -23,7 +25,13 @@ log = logging.getLogger(__name__)
log
.
setLevel
(
SRC_LOG_LEVELS
[
"LITELLM"
])
from
config
import
MODEL_FILTER_ENABLED
,
MODEL_FILTER_LIST
,
DATA_DIR
from
config
import
(
MODEL_FILTER_ENABLED
,
MODEL_FILTER_LIST
,
DATA_DIR
,
LITELLM_PROXY_PORT
,
LITELLM_PROXY_HOST
,
)
from
litellm.utils
import
get_llm_provider
...
...
@@ -64,7 +72,7 @@ async def run_background_process(command):
log
.
info
(
f
"Executing command:
{
command
}
"
)
# Execute the command and create a subprocess
process
=
await
asyncio
.
create_subprocess_exec
(
*
command
.
split
()
,
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
PIPE
*
command
,
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
PIPE
)
background_process
=
process
log
.
info
(
"Subprocess started successfully."
)
...
...
@@ -90,9 +98,17 @@ async def run_background_process(command):
async
def
start_litellm_background
():
log
.
info
(
"start_litellm_background"
)
# Command to run in the background
command
=
(
"litellm --port 14365 --telemetry False --config ./data/litellm/config.yaml"
)
command
=
[
"litellm"
,
"--port"
,
str
(
LITELLM_PROXY_PORT
),
"--host"
,
LITELLM_PROXY_HOST
,
"--telemetry"
,
"False"
,
"--config"
,
LITELLM_CONFIG_DIR
,
]
await
run_background_process
(
command
)
...
...
@@ -109,7 +125,6 @@ async def shutdown_litellm_background():
@
app
.
on_event
(
"startup"
)
async
def
startup_event
():
log
.
info
(
"startup_event"
)
# TODO: Check config.yaml file and create one
asyncio
.
create_task
(
start_litellm_background
())
...
...
@@ -186,7 +201,7 @@ async def get_models(user=Depends(get_current_user)):
while
not
background_process
:
await
asyncio
.
sleep
(
0.1
)
url
=
"http://localhost:
14365
/v1"
url
=
f
"http://localhost:
{
LITELLM_PROXY_PORT
}
/v1"
r
=
None
try
:
r
=
requests
.
request
(
method
=
"GET"
,
url
=
f
"
{
url
}
/models"
)
...
...
@@ -289,7 +304,7 @@ async def delete_model_from_config(
async
def
proxy
(
path
:
str
,
request
:
Request
,
user
=
Depends
(
get_verified_user
)):
body
=
await
request
.
body
()
url
=
"http://localhost:
14365
"
url
=
f
"http://localhost:
{
LITELLM_PROXY_PORT
}
"
target_url
=
f
"
{
url
}
/
{
path
}
"
...
...
backend/config.py
View file @
b5918914
...
...
@@ -499,3 +499,12 @@ IMAGES_OPENAI_API_KEY = os.getenv("IMAGES_OPENAI_API_KEY", OPENAI_API_KEY)
AUDIO_OPENAI_API_BASE_URL
=
os
.
getenv
(
"AUDIO_OPENAI_API_BASE_URL"
,
OPENAI_API_BASE_URL
)
AUDIO_OPENAI_API_KEY
=
os
.
getenv
(
"AUDIO_OPENAI_API_KEY"
,
OPENAI_API_KEY
)
####################################
# LiteLLM
####################################
LITELLM_PROXY_PORT
=
int
(
os
.
getenv
(
"LITELLM_PROXY_PORT"
,
"14365"
))
if
LITELLM_PROXY_PORT
<
0
or
LITELLM_PROXY_PORT
>
65535
:
raise
ValueError
(
"Invalid port number for LITELLM_PROXY_PORT"
)
LITELLM_PROXY_HOST
=
os
.
getenv
(
"LITELLM_PROXY_HOST"
,
"127.0.0.1"
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment