Unverified Commit 8df0429c authored by Timothy Jaeryang Baek's avatar Timothy Jaeryang Baek Committed by GitHub
Browse files

Merge branch 'dev' into feat/model-config

parents f21c8626 59602eea
name: Deploy to HuggingFace Spaces
on:
push:
branches:
- dev
- main
workflow_dispatch:
jobs:
check-secret:
runs-on: ubuntu-latest
outputs:
token-set: ${{ steps.check-key.outputs.defined }}
steps:
- id: check-key
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
if: "${{ env.HF_TOKEN != '' }}"
run: echo "defined=true" >> $GITHUB_OUTPUT
deploy:
runs-on: ubuntu-latest
needs: [check-secret]
if: needs.check-secret.outputs.token-set == 'true'
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Remove git history
run: rm -rf .git
- name: Prepend YAML front matter to README.md
run: |
echo "---" > temp_readme.md
echo "title: Open WebUI" >> temp_readme.md
echo "emoji: 🐳" >> temp_readme.md
echo "colorFrom: purple" >> temp_readme.md
echo "colorTo: gray" >> temp_readme.md
echo "sdk: docker" >> temp_readme.md
echo "app_port: 8080" >> temp_readme.md
echo "---" >> temp_readme.md
cat README.md >> temp_readme.md
mv temp_readme.md README.md
- name: Configure git
run: |
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
- name: Set up Git and push to Space
run: |
git init --initial-branch=main
git lfs track "*.ttf"
rm demo.gif
git add .
git commit -m "GitHub deploy: ${{ github.sha }}"
git push --force https://open-webui:${HF_TOKEN}@huggingface.co/spaces/open-webui/open-webui main
name: Release to PyPI
on:
push:
branches:
- main # or whatever branch you want to use
- dev
jobs:
release:
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/open-webui
permissions:
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 18
- uses: actions/setup-python@v5
with:
python-version: 3.11
- name: Build
run: |
python -m pip install --upgrade pip
pip install build
python -m build .
- name: Publish package distributions to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
...@@ -132,7 +132,8 @@ RUN pip3 install uv && \ ...@@ -132,7 +132,8 @@ RUN pip3 install uv && \
uv pip install --system -r requirements.txt --no-cache-dir && \ uv pip install --system -r requirements.txt --no-cache-dir && \
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \ python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \ python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
fi fi; \
chown -R $UID:$GID /app/backend/data/
......
...@@ -43,6 +43,7 @@ from utils.utils import ( ...@@ -43,6 +43,7 @@ from utils.utils import (
from config import ( from config import (
SRC_LOG_LEVELS, SRC_LOG_LEVELS,
OLLAMA_BASE_URLS, OLLAMA_BASE_URLS,
ENABLE_OLLAMA_API,
ENABLE_MODEL_FILTER, ENABLE_MODEL_FILTER,
MODEL_FILTER_LIST, MODEL_FILTER_LIST,
UPLOAD_DIR, UPLOAD_DIR,
...@@ -68,6 +69,8 @@ app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER ...@@ -68,6 +69,8 @@ app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.MODEL_CONFIG = Models.get_all_models() app.state.MODEL_CONFIG = Models.get_all_models()
app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
app.state.config.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS app.state.config.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS
app.state.MODELS = {} app.state.MODELS = {}
...@@ -97,6 +100,21 @@ async def get_status(): ...@@ -97,6 +100,21 @@ async def get_status():
return {"status": True} return {"status": True}
@app.get("/config")
async def get_config(user=Depends(get_admin_user)):
return {"ENABLE_OLLAMA_API": app.state.config.ENABLE_OLLAMA_API}
class OllamaConfigForm(BaseModel):
enable_ollama_api: Optional[bool] = None
@app.post("/config/update")
async def update_config(form_data: OllamaConfigForm, user=Depends(get_admin_user)):
app.state.config.ENABLE_OLLAMA_API = form_data.enable_ollama_api
return {"ENABLE_OLLAMA_API": app.state.config.ENABLE_OLLAMA_API}
@app.get("/urls") @app.get("/urls")
async def get_ollama_api_urls(user=Depends(get_admin_user)): async def get_ollama_api_urls(user=Depends(get_admin_user)):
return {"OLLAMA_BASE_URLS": app.state.config.OLLAMA_BASE_URLS} return {"OLLAMA_BASE_URLS": app.state.config.OLLAMA_BASE_URLS}
...@@ -157,17 +175,24 @@ def merge_models_lists(model_lists): ...@@ -157,17 +175,24 @@ def merge_models_lists(model_lists):
async def get_all_models(): async def get_all_models():
log.info("get_all_models()") log.info("get_all_models()")
tasks = [fetch_url(f"{url}/api/tags") for url in app.state.config.OLLAMA_BASE_URLS]
responses = await asyncio.gather(*tasks)
models = { if app.state.config.ENABLE_OLLAMA_API:
"models": merge_models_lists( tasks = [
map( fetch_url(f"{url}/api/tags") for url in app.state.config.OLLAMA_BASE_URLS
lambda response: (response["models"] if response else None), ]
responses, responses = await asyncio.gather(*tasks)
models = {
"models": merge_models_lists(
map(
lambda response: response["models"] if response else None, responses
)
) )
) }
}
else:
models = {"models": []}
for model in models["models"]: for model in models["models"]:
add_custom_info_to_model(model) add_custom_info_to_model(model)
......
...@@ -316,6 +316,7 @@ async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_use ...@@ -316,6 +316,7 @@ async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_use
@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
async def proxy(path: str, request: Request, user=Depends(get_verified_user)): async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
idx = 0 idx = 0
pipeline = False
body = await request.body() body = await request.body()
# TODO: Remove below after gpt-4-vision fix from Open AI # TODO: Remove below after gpt-4-vision fix from Open AI
...@@ -324,7 +325,15 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)): ...@@ -324,7 +325,15 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
body = body.decode("utf-8") body = body.decode("utf-8")
body = json.loads(body) body = json.loads(body)
idx = app.state.MODELS[body.get("model")]["urlIdx"] model = app.state.MODELS[body.get("model")]
idx = model["urlIdx"]
if "pipeline" in model:
pipeline = model.get("pipeline")
if pipeline:
body["user"] = {"name": user.name, "id": user.id}
# Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000 # Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000
# This is a workaround until OpenAI fixes the issue with this model # This is a workaround until OpenAI fixes the issue with this model
......
...@@ -3,7 +3,7 @@ import json ...@@ -3,7 +3,7 @@ import json
from peewee import * from peewee import *
from peewee_migrate import Router from peewee_migrate import Router
from playhouse.db_url import connect from playhouse.db_url import connect
from config import SRC_LOG_LEVELS, DATA_DIR, DATABASE_URL from config import SRC_LOG_LEVELS, DATA_DIR, DATABASE_URL, BACKEND_DIR
import os import os
import logging import logging
...@@ -30,6 +30,8 @@ else: ...@@ -30,6 +30,8 @@ else:
DB = connect(DATABASE_URL) DB = connect(DATABASE_URL)
log.info(f"Connected to a {DB.__class__.__name__} database.") log.info(f"Connected to a {DB.__class__.__name__} database.")
router = Router(DB, migrate_dir="apps/web/internal/migrations", logger=log) router = Router(
DB, migrate_dir=BACKEND_DIR / "apps" / "web" / "internal" / "migrations", logger=log
)
router.run() router.run()
DB.connect(reuse_if_open=True) DB.connect(reuse_if_open=True)
import os import os
import sys import sys
import logging import logging
import importlib.metadata
import pkgutil
import chromadb import chromadb
from chromadb import Settings from chromadb import Settings
from base64 import b64encode from base64 import b64encode
...@@ -22,10 +24,13 @@ from constants import ERROR_MESSAGES ...@@ -22,10 +24,13 @@ from constants import ERROR_MESSAGES
# Load .env file # Load .env file
#################################### ####################################
BACKEND_DIR = Path(__file__).parent # the path containing this file
BASE_DIR = BACKEND_DIR.parent # the path containing the backend/
try: try:
from dotenv import load_dotenv, find_dotenv from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv("../.env")) load_dotenv(find_dotenv(str(BASE_DIR / ".env")))
except ImportError: except ImportError:
print("dotenv not installed, skipping...") print("dotenv not installed, skipping...")
...@@ -87,10 +92,12 @@ WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png" ...@@ -87,10 +92,12 @@ WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png"
ENV = os.environ.get("ENV", "dev") ENV = os.environ.get("ENV", "dev")
try: try:
with open(f"../package.json", "r") as f: PACKAGE_DATA = json.loads((BASE_DIR / "package.json").read_text())
PACKAGE_DATA = json.load(f)
except: except:
PACKAGE_DATA = {"version": "0.0.0"} try:
PACKAGE_DATA = {"version": importlib.metadata.version("open-webui")}
except importlib.metadata.PackageNotFoundError:
PACKAGE_DATA = {"version": "0.0.0"}
VERSION = PACKAGE_DATA["version"] VERSION = PACKAGE_DATA["version"]
...@@ -115,10 +122,10 @@ def parse_section(section): ...@@ -115,10 +122,10 @@ def parse_section(section):
try: try:
with open("../CHANGELOG.md", "r") as file: changelog_content = (BASE_DIR / "CHANGELOG.md").read_text()
changelog_content = file.read()
except: except:
changelog_content = "" changelog_content = (pkgutil.get_data("open_webui", "CHANGELOG.md") or b"").decode()
# Convert markdown content to HTML # Convert markdown content to HTML
html_content = markdown.markdown(changelog_content) html_content = markdown.markdown(changelog_content)
...@@ -164,12 +171,11 @@ WEBUI_VERSION = os.environ.get("WEBUI_VERSION", "v1.0.0-alpha.100") ...@@ -164,12 +171,11 @@ WEBUI_VERSION = os.environ.get("WEBUI_VERSION", "v1.0.0-alpha.100")
# DATA/FRONTEND BUILD DIR # DATA/FRONTEND BUILD DIR
#################################### ####################################
DATA_DIR = str(Path(os.getenv("DATA_DIR", "./data")).resolve()) DATA_DIR = Path(os.getenv("DATA_DIR", BACKEND_DIR / "data")).resolve()
FRONTEND_BUILD_DIR = str(Path(os.getenv("FRONTEND_BUILD_DIR", "../build"))) FRONTEND_BUILD_DIR = Path(os.getenv("FRONTEND_BUILD_DIR", BASE_DIR / "build")).resolve()
try: try:
with open(f"{DATA_DIR}/config.json", "r") as f: CONFIG_DATA = json.loads((DATA_DIR / "config.json").read_text())
CONFIG_DATA = json.load(f)
except: except:
CONFIG_DATA = {} CONFIG_DATA = {}
...@@ -279,11 +285,11 @@ JWT_EXPIRES_IN = PersistentConfig( ...@@ -279,11 +285,11 @@ JWT_EXPIRES_IN = PersistentConfig(
# Static DIR # Static DIR
#################################### ####################################
STATIC_DIR = str(Path(os.getenv("STATIC_DIR", "./static")).resolve()) STATIC_DIR = Path(os.getenv("STATIC_DIR", BACKEND_DIR / "static")).resolve()
frontend_favicon = f"{FRONTEND_BUILD_DIR}/favicon.png" frontend_favicon = FRONTEND_BUILD_DIR / "favicon.png"
if os.path.exists(frontend_favicon): if frontend_favicon.exists():
shutil.copyfile(frontend_favicon, f"{STATIC_DIR}/favicon.png") shutil.copyfile(frontend_favicon, STATIC_DIR / "favicon.png")
else: else:
logging.warning(f"Frontend favicon not found at {frontend_favicon}") logging.warning(f"Frontend favicon not found at {frontend_favicon}")
...@@ -378,6 +384,13 @@ if not os.path.exists(LITELLM_CONFIG_PATH): ...@@ -378,6 +384,13 @@ if not os.path.exists(LITELLM_CONFIG_PATH):
# OLLAMA_BASE_URL # OLLAMA_BASE_URL
#################################### ####################################
ENABLE_OLLAMA_API = PersistentConfig(
"ENABLE_OLLAMA_API",
"ollama.enable",
os.environ.get("ENABLE_OLLAMA_API", "True").lower() == "true",
)
OLLAMA_API_BASE_URL = os.environ.get( OLLAMA_API_BASE_URL = os.environ.get(
"OLLAMA_API_BASE_URL", "http://localhost:11434/api" "OLLAMA_API_BASE_URL", "http://localhost:11434/api"
) )
......
...@@ -8,6 +8,7 @@ import sys ...@@ -8,6 +8,7 @@ import sys
import logging import logging
import aiohttp import aiohttp
import requests import requests
import mimetypes
from fastapi import FastAPI, Request, Depends, status from fastapi import FastAPI, Request, Depends, status
from fastapi.staticfiles import StaticFiles from fastapi.staticfiles import StaticFiles
...@@ -437,6 +438,7 @@ app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static") ...@@ -437,6 +438,7 @@ app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache") app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
if os.path.exists(FRONTEND_BUILD_DIR): if os.path.exists(FRONTEND_BUILD_DIR):
mimetypes.add_type("text/javascript", ".js")
app.mount( app.mount(
"/", "/",
SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True), SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
......
import base64
import os
import random
from pathlib import Path
import typer
import uvicorn
app = typer.Typer()
KEY_FILE = Path.cwd() / ".webui_secret_key"
if (frontend_build_dir := Path(__file__).parent / "frontend").exists():
os.environ["FRONTEND_BUILD_DIR"] = str(frontend_build_dir)
@app.command()
def serve(
host: str = "0.0.0.0",
port: int = 8080,
):
if os.getenv("WEBUI_SECRET_KEY") is None:
typer.echo(
"Loading WEBUI_SECRET_KEY from file, not provided as an environment variable."
)
if not KEY_FILE.exists():
typer.echo(f"Generating a new secret key and saving it to {KEY_FILE}")
KEY_FILE.write_bytes(base64.b64encode(random.randbytes(12)))
typer.echo(f"Loading WEBUI_SECRET_KEY from {KEY_FILE}")
os.environ["WEBUI_SECRET_KEY"] = KEY_FILE.read_text()
if os.getenv("USE_CUDA_DOCKER", "false") == "true":
typer.echo(
"CUDA is enabled, appending LD_LIBRARY_PATH to include torch/cudnn & cublas libraries."
)
LD_LIBRARY_PATH = os.getenv("LD_LIBRARY_PATH", "").split(":")
os.environ["LD_LIBRARY_PATH"] = ":".join(
LD_LIBRARY_PATH
+ [
"/usr/local/lib/python3.11/site-packages/torch/lib",
"/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib",
]
)
import main # we need set environment variables before importing main
uvicorn.run(main.app, host=host, port=port, forwarded_allow_ips="*")
@app.command()
def dev(
host: str = "0.0.0.0",
port: int = 8080,
reload: bool = True,
):
uvicorn.run(
"main:app", host=host, port=port, reload=reload, forwarded_allow_ips="*"
)
if __name__ == "__main__":
app()
litellm_settings:
drop_params: true
model_list:
- model_name: 'HuggingFace: Mistral: Mistral 7B Instruct v0.1'
litellm_params:
model: huggingface/mistralai/Mistral-7B-Instruct-v0.1
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Mistral: Mistral 7B Instruct v0.2'
litellm_params:
model: huggingface/mistralai/Mistral-7B-Instruct-v0.2
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Meta: Llama 3 8B Instruct'
litellm_params:
model: huggingface/meta-llama/Meta-Llama-3-8B-Instruct
api_key: os.environ/HF_TOKEN
max_tokens: 2047
- model_name: 'HuggingFace: Mistral: Mixtral 8x7B Instruct v0.1'
litellm_params:
model: huggingface/mistralai/Mixtral-8x7B-Instruct-v0.1
api_key: os.environ/HF_TOKEN
max_tokens: 8192
- model_name: 'HuggingFace: Microsoft: Phi-3 Mini-4K-Instruct'
litellm_params:
model: huggingface/microsoft/Phi-3-mini-4k-instruct
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Google: Gemma 7B 1.1'
litellm_params:
model: huggingface/google/gemma-1.1-7b-it
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Yi-1.5 34B Chat'
litellm_params:
model: huggingface/01-ai/Yi-1.5-34B-Chat
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Nous Research: Nous Hermes 2 Mixtral 8x7B DPO'
litellm_params:
model: huggingface/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
api_key: os.environ/HF_TOKEN
max_tokens: 2048
...@@ -30,4 +30,34 @@ if [ "$USE_CUDA_DOCKER" = "true" ]; then ...@@ -30,4 +30,34 @@ if [ "$USE_CUDA_DOCKER" = "true" ]; then
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.11/site-packages/torch/lib:/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib" export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.11/site-packages/torch/lib:/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib"
fi fi
# Check if SPACE_ID is set, if so, configure for space
if [ -n "$SPACE_ID" ]; then
echo "Configuring for HuggingFace Space deployment"
# Copy litellm_config.yaml with specified ownership
echo "Copying litellm_config.yaml to the desired location with specified ownership..."
cp -f ./space/litellm_config.yaml ./data/litellm/config.yaml
if [ -n "$ADMIN_USER_EMAIL" ] && [ -n "$ADMIN_USER_PASSWORD" ]; then
echo "Admin user configured, creating"
WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*' &
webui_pid=$!
echo "Waiting for webui to start..."
while ! curl -s http://localhost:8080/health > /dev/null; do
sleep 1
done
echo "Creating admin user..."
curl \
-X POST "http://localhost:8080/api/v1/auths/signup" \
-H "accept: application/json" \
-H "Content-Type: application/json" \
-d "{ \"email\": \"${ADMIN_USER_EMAIL}\", \"password\": \"${ADMIN_USER_PASSWORD}\", \"name\": \"Admin\" }"
echo "Shutting down webui..."
kill $webui_pid
fi
export WEBUI_URL=${SPACE_HOST}
fi
WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*' WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*'
# noqa: INP001
import shutil
import subprocess
from sys import stderr
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomBuildHook(BuildHookInterface):
def initialize(self, version, build_data):
super().initialize(version, build_data)
stderr.write(">>> Building Open Webui frontend\n")
npm = shutil.which("npm")
if npm is None:
raise RuntimeError(
"NodeJS `npm` is required for building Open Webui but it was not found"
)
stderr.write("### npm install\n")
subprocess.run([npm, "install"], check=True) # noqa: S603
stderr.write("\n### npm run build\n")
subprocess.run([npm, "run", "build"], check=True) # noqa: S603
{ {
"name": "open-webui", "name": "open-webui",
"version": "0.1.125", "version": "0.2.0.dev1",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "open-webui", "name": "open-webui",
"version": "0.1.125", "version": "0.2.0.dev1",
"dependencies": { "dependencies": {
"@pyscript/core": "^0.4.32", "@pyscript/core": "^0.4.32",
"@sveltejs/adapter-node": "^1.3.1", "@sveltejs/adapter-node": "^1.3.1",
......
{ {
"name": "open-webui", "name": "open-webui",
"version": "0.1.125", "version": "0.2.0.dev1",
"private": true, "private": true,
"scripts": { "scripts": {
"dev": "npm run pyodide:fetch && vite dev --host", "dev": "npm run pyodide:fetch && vite dev --host",
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
"lint:types": "npm run check", "lint:types": "npm run check",
"lint:backend": "pylint backend/", "lint:backend": "pylint backend/",
"format": "prettier --plugin-search-dir --write \"**/*.{js,ts,svelte,css,md,html,json}\"", "format": "prettier --plugin-search-dir --write \"**/*.{js,ts,svelte,css,md,html,json}\"",
"format:backend": "black . --exclude \"/venv/\"", "format:backend": "black . --exclude \".venv/|/venv/\"",
"i18n:parse": "i18next --config i18next-parser.config.ts && prettier --write \"src/lib/i18n/**/*.{js,json}\"", "i18n:parse": "i18next --config i18next-parser.config.ts && prettier --write \"src/lib/i18n/**/*.{js,json}\"",
"cy:open": "cypress open", "cy:open": "cypress open",
"test:frontend": "vitest", "test:frontend": "vitest",
......
[project]
name = "open-webui"
description = "Open WebUI (Formerly Ollama WebUI)"
authors = [
{ name = "Timothy Jaeryang Baek", email = "tim@openwebui.com" }
]
license = { file = "LICENSE" }
dependencies = [
"fastapi==0.109.2",
"uvicorn[standard]==0.22.0",
"pydantic==2.7.1",
"python-multipart==0.0.9",
"Flask==3.0.3",
"Flask-Cors==4.0.0",
"python-socketio==5.11.2",
"python-jose==3.3.0",
"passlib[bcrypt]==1.7.4",
"requests==2.31.0",
"aiohttp==3.9.5",
"peewee==3.17.3",
"peewee-migrate==1.12.2",
"psycopg2-binary==2.9.9",
"PyMySQL==1.1.0",
"bcrypt==4.1.2",
"litellm[proxy]==1.35.28",
"boto3==1.34.95",
"argon2-cffi==23.1.0",
"APScheduler==3.10.4",
"google-generativeai==0.5.2",
"langchain==0.1.16",
"langchain-community==0.0.34",
"langchain-chroma==0.1.0",
"fake-useragent==1.5.1",
"chromadb==0.4.24",
"sentence-transformers==2.7.0",
"pypdf==4.2.0",
"docx2txt==0.8",
"unstructured==0.11.8",
"Markdown==3.6",
"pypandoc==1.13",
"pandas==2.2.2",
"openpyxl==3.1.2",
"pyxlsb==1.0.10",
"xlrd==2.0.1",
"validators==0.28.1",
"opencv-python-headless==4.9.0.80",
"rapidocr-onnxruntime==1.2.3",
"fpdf2==2.7.8",
"rank-bm25==0.2.2",
"faster-whisper==1.0.1",
"PyJWT[crypto]==2.8.0",
"black==24.4.2",
"langfuse==2.27.3",
"youtube-transcript-api==0.6.2",
"pytube",
]
readme = "README.md"
requires-python = ">= 3.11, < 3.12.0a1"
dynamic = ["version"]
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.11",
"Topic :: Communications :: Chat",
"Topic :: Multimedia",
]
[project.scripts]
open-webui = "open_webui:app"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.rye]
managed = true
dev-dependencies = []
[tool.hatch.metadata]
allow-direct-references = true
[tool.hatch.version]
path = "package.json"
pattern = '"version":\s*"(?P<version>[^"]+)"'
[tool.hatch.build.hooks.custom] # keep this for reading hooks from `hatch_build.py`
[tool.hatch.build.targets.wheel]
sources = ["backend"]
exclude = [
".dockerignore",
".gitignore",
".webui_secret_key",
"dev.sh",
"requirements.txt",
"start.sh",
"start_windows.bat",
"webui.db",
"chroma.sqlite3",
]
force-include = { "CHANGELOG.md" = "open_webui/CHANGELOG.md", build = "open_webui/frontend" }
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
-e file:.
aiohttp==3.9.5
# via langchain
# via langchain-community
# via litellm
# via open-webui
aiosignal==1.3.1
# via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.3.0
# via httpx
# via openai
# via starlette
# via watchfiles
apscheduler==3.10.4
# via litellm
# via open-webui
argon2-cffi==23.1.0
# via open-webui
argon2-cffi-bindings==21.2.0
# via argon2-cffi
asgiref==3.8.1
# via opentelemetry-instrumentation-asgi
attrs==23.2.0
# via aiohttp
av==11.0.0
# via faster-whisper
backoff==2.2.1
# via langfuse
# via litellm
# via posthog
# via unstructured
bcrypt==4.1.2
# via chromadb
# via open-webui
# via passlib
beautifulsoup4==4.12.3
# via unstructured
bidict==0.23.1
# via python-socketio
black==24.4.2
# via open-webui
blinker==1.8.2
# via flask
boto3==1.34.95
# via open-webui
botocore==1.34.103
# via boto3
# via s3transfer
build==1.2.1
# via chromadb
cachetools==5.3.3
# via google-auth
certifi==2024.2.2
# via httpcore
# via httpx
# via kubernetes
# via pulsar-client
# via requests
# via unstructured-client
cffi==1.16.0
# via argon2-cffi-bindings
# via cryptography
chardet==5.2.0
# via unstructured
charset-normalizer==3.3.2
# via requests
# via unstructured-client
chroma-hnswlib==0.7.3
# via chromadb
chromadb==0.4.24
# via langchain-chroma
# via open-webui
click==8.1.7
# via black
# via flask
# via litellm
# via nltk
# via peewee-migrate
# via rq
# via typer
# via uvicorn
coloredlogs==15.0.1
# via onnxruntime
cryptography==42.0.7
# via litellm
# via pyjwt
ctranslate2==4.2.1
# via faster-whisper
dataclasses-json==0.6.6
# via langchain
# via langchain-community
# via unstructured
# via unstructured-client
deepdiff==7.0.1
# via unstructured-client
defusedxml==0.7.1
# via fpdf2
deprecated==1.2.14
# via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc
distro==1.9.0
# via openai
dnspython==2.6.1
# via email-validator
docx2txt==0.8
# via open-webui
ecdsa==0.19.0
# via python-jose
email-validator==2.1.1
# via pydantic
emoji==2.11.1
# via unstructured
et-xmlfile==1.1.0
# via openpyxl
fake-useragent==1.5.1
# via open-webui
fastapi==0.109.2
# via chromadb
# via fastapi-sso
# via langchain-chroma
# via litellm
# via open-webui
fastapi-sso==0.10.0
# via litellm
faster-whisper==1.0.1
# via open-webui
filelock==3.14.0
# via huggingface-hub
# via torch
# via transformers
filetype==1.2.0
# via unstructured
flask==3.0.3
# via flask-cors
# via open-webui
flask-cors==4.0.0
# via open-webui
flatbuffers==24.3.25
# via onnxruntime
fonttools==4.51.0
# via fpdf2
fpdf2==2.7.8
# via open-webui
frozenlist==1.4.1
# via aiohttp
# via aiosignal
fsspec==2024.3.1
# via huggingface-hub
# via torch
google-ai-generativelanguage==0.6.2
# via google-generativeai
google-api-core==2.19.0
# via google-ai-generativelanguage
# via google-api-python-client
# via google-generativeai
google-api-python-client==2.129.0
# via google-generativeai
google-auth==2.29.0
# via google-ai-generativelanguage
# via google-api-core
# via google-api-python-client
# via google-auth-httplib2
# via google-generativeai
# via kubernetes
google-auth-httplib2==0.2.0
# via google-api-python-client
google-generativeai==0.5.2
# via open-webui
googleapis-common-protos==1.63.0
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio==1.63.0
# via chromadb
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio-status==1.62.2
# via google-api-core
gunicorn==21.2.0
# via litellm
h11==0.14.0
# via httpcore
# via uvicorn
# via wsproto
httpcore==1.0.5
# via httpx
httplib2==0.22.0
# via google-api-python-client
# via google-auth-httplib2
httptools==0.6.1
# via uvicorn
httpx==0.27.0
# via fastapi-sso
# via langfuse
# via openai
huggingface-hub==0.23.0
# via faster-whisper
# via sentence-transformers
# via tokenizers
# via transformers
humanfriendly==10.0
# via coloredlogs
idna==3.7
# via anyio
# via email-validator
# via httpx
# via langfuse
# via requests
# via unstructured-client
# via yarl
importlib-metadata==7.0.0
# via litellm
# via opentelemetry-api
importlib-resources==6.4.0
# via chromadb
itsdangerous==2.2.0
# via flask
jinja2==3.1.4
# via flask
# via litellm
# via torch
jmespath==1.0.1
# via boto3
# via botocore
joblib==1.4.2
# via nltk
# via scikit-learn
jsonpatch==1.33
# via langchain
# via langchain-core
jsonpath-python==1.0.6
# via unstructured-client
jsonpointer==2.4
# via jsonpatch
kubernetes==29.0.0
# via chromadb
langchain==0.1.16
# via open-webui
langchain-chroma==0.1.0
# via open-webui
langchain-community==0.0.34
# via langchain
# via open-webui
langchain-core==0.1.52
# via langchain
# via langchain-chroma
# via langchain-community
# via langchain-text-splitters
langchain-text-splitters==0.0.1
# via langchain
langdetect==1.0.9
# via unstructured
langfuse==2.27.3
# via open-webui
langsmith==0.1.57
# via langchain
# via langchain-community
# via langchain-core
litellm==1.35.28
# via open-webui
lxml==5.2.2
# via unstructured
markdown==3.6
# via open-webui
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via jinja2
# via werkzeug
marshmallow==3.21.2
# via dataclasses-json
# via unstructured-client
mdurl==0.1.2
# via markdown-it-py
mmh3==4.1.0
# via chromadb
monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.0.5
# via aiohttp
# via yarl
mypy-extensions==1.0.0
# via black
# via typing-inspect
# via unstructured-client
networkx==3.3
# via torch
nltk==3.8.1
# via unstructured
numpy==1.26.4
# via chroma-hnswlib
# via chromadb
# via ctranslate2
# via langchain
# via langchain-chroma
# via langchain-community
# via onnxruntime
# via opencv-python
# via opencv-python-headless
# via pandas
# via rank-bm25
# via rapidocr-onnxruntime
# via scikit-learn
# via scipy
# via sentence-transformers
# via shapely
# via transformers
# via unstructured
oauthlib==3.2.2
# via fastapi-sso
# via kubernetes
# via requests-oauthlib
onnxruntime==1.17.3
# via chromadb
# via faster-whisper
# via rapidocr-onnxruntime
openai==1.28.1
# via litellm
opencv-python==4.9.0.80
# via rapidocr-onnxruntime
opencv-python-headless==4.9.0.80
# via open-webui
openpyxl==3.1.2
# via open-webui
opentelemetry-api==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-instrumentation
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-exporter-otlp-proto-common==1.24.0
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-exporter-otlp-proto-grpc==1.24.0
# via chromadb
opentelemetry-instrumentation==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-asgi==0.45b0
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-fastapi==0.45b0
# via chromadb
opentelemetry-proto==1.24.0
# via opentelemetry-exporter-otlp-proto-common
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-sdk==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-semantic-conventions==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-util-http==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
ordered-set==4.1.0
# via deepdiff
orjson==3.10.3
# via chromadb
# via langsmith
# via litellm
overrides==7.7.0
# via chromadb
packaging==23.2
# via black
# via build
# via gunicorn
# via huggingface-hub
# via langchain-core
# via langfuse
# via marshmallow
# via onnxruntime
# via transformers
# via unstructured-client
pandas==2.2.2
# via open-webui
passlib==1.7.4
# via open-webui
pathspec==0.12.1
# via black
peewee==3.17.3
# via open-webui
# via peewee-migrate
peewee-migrate==1.12.2
# via open-webui
pillow==10.3.0
# via fpdf2
# via rapidocr-onnxruntime
# via sentence-transformers
platformdirs==4.2.1
# via black
posthog==3.5.0
# via chromadb
proto-plus==1.23.0
# via google-ai-generativelanguage
# via google-api-core
protobuf==4.25.3
# via google-ai-generativelanguage
# via google-api-core
# via google-generativeai
# via googleapis-common-protos
# via grpcio-status
# via onnxruntime
# via opentelemetry-proto
# via proto-plus
psycopg2-binary==2.9.9
# via open-webui
pulsar-client==3.5.0
# via chromadb
pyasn1==0.6.0
# via pyasn1-modules
# via python-jose
# via rsa
pyasn1-modules==0.4.0
# via google-auth
pyclipper==1.3.0.post5
# via rapidocr-onnxruntime
pycparser==2.22
# via cffi
pydantic==2.7.1
# via chromadb
# via fastapi
# via fastapi-sso
# via google-generativeai
# via langchain
# via langchain-core
# via langfuse
# via langsmith
# via open-webui
# via openai
pydantic-core==2.18.2
# via pydantic
pygments==2.18.0
# via rich
pyjwt==2.8.0
# via litellm
# via open-webui
pymysql==1.1.0
# via open-webui
pypandoc==1.13
# via open-webui
pyparsing==3.1.2
# via httplib2
pypdf==4.2.0
# via open-webui
# via unstructured-client
pypika==0.48.9
# via chromadb
pyproject-hooks==1.1.0
# via build
python-dateutil==2.9.0.post0
# via botocore
# via kubernetes
# via pandas
# via posthog
# via unstructured-client
python-dotenv==1.0.1
# via litellm
# via uvicorn
python-engineio==4.9.0
# via python-socketio
python-iso639==2024.4.27
# via unstructured
python-jose==3.3.0
# via open-webui
python-magic==0.4.27
# via unstructured
python-multipart==0.0.9
# via litellm
# via open-webui
python-socketio==5.11.2
# via open-webui
pytube==15.0.0
# via open-webui
pytz==2024.1
# via apscheduler
# via pandas
pyxlsb==1.0.10
# via open-webui
pyyaml==6.0.1
# via chromadb
# via ctranslate2
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langchain-core
# via litellm
# via rapidocr-onnxruntime
# via transformers
# via uvicorn
rank-bm25==0.2.2
# via open-webui
rapidfuzz==3.9.0
# via unstructured
rapidocr-onnxruntime==1.2.3
# via open-webui
redis==5.0.4
# via rq
regex==2024.5.10
# via nltk
# via tiktoken
# via transformers
requests==2.31.0
# via chromadb
# via google-api-core
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langsmith
# via litellm
# via open-webui
# via posthog
# via requests-oauthlib
# via tiktoken
# via transformers
# via unstructured
# via unstructured-client
# via youtube-transcript-api
requests-oauthlib==2.0.0
# via kubernetes
rich==13.7.1
# via typer
rq==1.16.2
# via litellm
rsa==4.9
# via google-auth
# via python-jose
s3transfer==0.10.1
# via boto3
safetensors==0.4.3
# via transformers
scikit-learn==1.4.2
# via sentence-transformers
scipy==1.13.0
# via scikit-learn
# via sentence-transformers
sentence-transformers==2.7.0
# via open-webui
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
shapely==2.0.4
# via rapidocr-onnxruntime
shellingham==1.5.4
# via typer
simple-websocket==1.0.0
# via python-engineio
six==1.16.0
# via apscheduler
# via ecdsa
# via kubernetes
# via langdetect
# via posthog
# via python-dateutil
# via rapidocr-onnxruntime
# via unstructured-client
sniffio==1.3.1
# via anyio
# via httpx
# via openai
soupsieve==2.5
# via beautifulsoup4
sqlalchemy==2.0.30
# via langchain
# via langchain-community
starlette==0.36.3
# via fastapi
sympy==1.12
# via onnxruntime
# via torch
tabulate==0.9.0
# via unstructured
tenacity==8.3.0
# via chromadb
# via langchain
# via langchain-community
# via langchain-core
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.6.0
# via litellm
tokenizers==0.15.2
# via chromadb
# via faster-whisper
# via litellm
# via transformers
torch==2.3.0
# via sentence-transformers
tqdm==4.66.4
# via chromadb
# via google-generativeai
# via huggingface-hub
# via nltk
# via openai
# via sentence-transformers
# via transformers
transformers==4.39.3
# via sentence-transformers
typer==0.12.3
# via chromadb
typing-extensions==4.11.0
# via chromadb
# via fastapi
# via google-generativeai
# via huggingface-hub
# via openai
# via opentelemetry-sdk
# via pydantic
# via pydantic-core
# via sqlalchemy
# via torch
# via typer
# via typing-inspect
# via unstructured
# via unstructured-client
typing-inspect==0.9.0
# via dataclasses-json
# via unstructured-client
tzdata==2024.1
# via pandas
tzlocal==5.2
# via apscheduler
unstructured==0.11.8
# via open-webui
unstructured-client==0.22.0
# via unstructured
uritemplate==4.1.1
# via google-api-python-client
urllib3==2.2.1
# via botocore
# via kubernetes
# via requests
# via unstructured-client
uvicorn==0.22.0
# via chromadb
# via litellm
# via open-webui
uvloop==0.19.0
# via uvicorn
validators==0.28.1
# via open-webui
watchfiles==0.21.0
# via uvicorn
websocket-client==1.8.0
# via kubernetes
websockets==12.0
# via uvicorn
werkzeug==3.0.3
# via flask
wrapt==1.16.0
# via deprecated
# via langfuse
# via opentelemetry-instrumentation
# via unstructured
wsproto==1.2.0
# via simple-websocket
xlrd==2.0.1
# via open-webui
yarl==1.9.4
# via aiohttp
youtube-transcript-api==0.6.2
# via open-webui
zipp==3.18.1
# via importlib-metadata
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
-e file:.
aiohttp==3.9.5
# via langchain
# via langchain-community
# via litellm
# via open-webui
aiosignal==1.3.1
# via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.3.0
# via httpx
# via openai
# via starlette
# via watchfiles
apscheduler==3.10.4
# via litellm
# via open-webui
argon2-cffi==23.1.0
# via open-webui
argon2-cffi-bindings==21.2.0
# via argon2-cffi
asgiref==3.8.1
# via opentelemetry-instrumentation-asgi
attrs==23.2.0
# via aiohttp
av==11.0.0
# via faster-whisper
backoff==2.2.1
# via langfuse
# via litellm
# via posthog
# via unstructured
bcrypt==4.1.2
# via chromadb
# via open-webui
# via passlib
beautifulsoup4==4.12.3
# via unstructured
bidict==0.23.1
# via python-socketio
black==24.4.2
# via open-webui
blinker==1.8.2
# via flask
boto3==1.34.95
# via open-webui
botocore==1.34.103
# via boto3
# via s3transfer
build==1.2.1
# via chromadb
cachetools==5.3.3
# via google-auth
certifi==2024.2.2
# via httpcore
# via httpx
# via kubernetes
# via pulsar-client
# via requests
# via unstructured-client
cffi==1.16.0
# via argon2-cffi-bindings
# via cryptography
chardet==5.2.0
# via unstructured
charset-normalizer==3.3.2
# via requests
# via unstructured-client
chroma-hnswlib==0.7.3
# via chromadb
chromadb==0.4.24
# via langchain-chroma
# via open-webui
click==8.1.7
# via black
# via flask
# via litellm
# via nltk
# via peewee-migrate
# via rq
# via typer
# via uvicorn
coloredlogs==15.0.1
# via onnxruntime
cryptography==42.0.7
# via litellm
# via pyjwt
ctranslate2==4.2.1
# via faster-whisper
dataclasses-json==0.6.6
# via langchain
# via langchain-community
# via unstructured
# via unstructured-client
deepdiff==7.0.1
# via unstructured-client
defusedxml==0.7.1
# via fpdf2
deprecated==1.2.14
# via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc
distro==1.9.0
# via openai
dnspython==2.6.1
# via email-validator
docx2txt==0.8
# via open-webui
ecdsa==0.19.0
# via python-jose
email-validator==2.1.1
# via pydantic
emoji==2.11.1
# via unstructured
et-xmlfile==1.1.0
# via openpyxl
fake-useragent==1.5.1
# via open-webui
fastapi==0.109.2
# via chromadb
# via fastapi-sso
# via langchain-chroma
# via litellm
# via open-webui
fastapi-sso==0.10.0
# via litellm
faster-whisper==1.0.1
# via open-webui
filelock==3.14.0
# via huggingface-hub
# via torch
# via transformers
filetype==1.2.0
# via unstructured
flask==3.0.3
# via flask-cors
# via open-webui
flask-cors==4.0.0
# via open-webui
flatbuffers==24.3.25
# via onnxruntime
fonttools==4.51.0
# via fpdf2
fpdf2==2.7.8
# via open-webui
frozenlist==1.4.1
# via aiohttp
# via aiosignal
fsspec==2024.3.1
# via huggingface-hub
# via torch
google-ai-generativelanguage==0.6.2
# via google-generativeai
google-api-core==2.19.0
# via google-ai-generativelanguage
# via google-api-python-client
# via google-generativeai
google-api-python-client==2.129.0
# via google-generativeai
google-auth==2.29.0
# via google-ai-generativelanguage
# via google-api-core
# via google-api-python-client
# via google-auth-httplib2
# via google-generativeai
# via kubernetes
google-auth-httplib2==0.2.0
# via google-api-python-client
google-generativeai==0.5.2
# via open-webui
googleapis-common-protos==1.63.0
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio==1.63.0
# via chromadb
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio-status==1.62.2
# via google-api-core
gunicorn==21.2.0
# via litellm
h11==0.14.0
# via httpcore
# via uvicorn
# via wsproto
httpcore==1.0.5
# via httpx
httplib2==0.22.0
# via google-api-python-client
# via google-auth-httplib2
httptools==0.6.1
# via uvicorn
httpx==0.27.0
# via fastapi-sso
# via langfuse
# via openai
huggingface-hub==0.23.0
# via faster-whisper
# via sentence-transformers
# via tokenizers
# via transformers
humanfriendly==10.0
# via coloredlogs
idna==3.7
# via anyio
# via email-validator
# via httpx
# via langfuse
# via requests
# via unstructured-client
# via yarl
importlib-metadata==7.0.0
# via litellm
# via opentelemetry-api
importlib-resources==6.4.0
# via chromadb
itsdangerous==2.2.0
# via flask
jinja2==3.1.4
# via flask
# via litellm
# via torch
jmespath==1.0.1
# via boto3
# via botocore
joblib==1.4.2
# via nltk
# via scikit-learn
jsonpatch==1.33
# via langchain
# via langchain-core
jsonpath-python==1.0.6
# via unstructured-client
jsonpointer==2.4
# via jsonpatch
kubernetes==29.0.0
# via chromadb
langchain==0.1.16
# via open-webui
langchain-chroma==0.1.0
# via open-webui
langchain-community==0.0.34
# via langchain
# via open-webui
langchain-core==0.1.52
# via langchain
# via langchain-chroma
# via langchain-community
# via langchain-text-splitters
langchain-text-splitters==0.0.1
# via langchain
langdetect==1.0.9
# via unstructured
langfuse==2.27.3
# via open-webui
langsmith==0.1.57
# via langchain
# via langchain-community
# via langchain-core
litellm==1.35.28
# via open-webui
lxml==5.2.2
# via unstructured
markdown==3.6
# via open-webui
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via jinja2
# via werkzeug
marshmallow==3.21.2
# via dataclasses-json
# via unstructured-client
mdurl==0.1.2
# via markdown-it-py
mmh3==4.1.0
# via chromadb
monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.0.5
# via aiohttp
# via yarl
mypy-extensions==1.0.0
# via black
# via typing-inspect
# via unstructured-client
networkx==3.3
# via torch
nltk==3.8.1
# via unstructured
numpy==1.26.4
# via chroma-hnswlib
# via chromadb
# via ctranslate2
# via langchain
# via langchain-chroma
# via langchain-community
# via onnxruntime
# via opencv-python
# via opencv-python-headless
# via pandas
# via rank-bm25
# via rapidocr-onnxruntime
# via scikit-learn
# via scipy
# via sentence-transformers
# via shapely
# via transformers
# via unstructured
oauthlib==3.2.2
# via fastapi-sso
# via kubernetes
# via requests-oauthlib
onnxruntime==1.17.3
# via chromadb
# via faster-whisper
# via rapidocr-onnxruntime
openai==1.28.1
# via litellm
opencv-python==4.9.0.80
# via rapidocr-onnxruntime
opencv-python-headless==4.9.0.80
# via open-webui
openpyxl==3.1.2
# via open-webui
opentelemetry-api==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-instrumentation
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-exporter-otlp-proto-common==1.24.0
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-exporter-otlp-proto-grpc==1.24.0
# via chromadb
opentelemetry-instrumentation==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-asgi==0.45b0
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-fastapi==0.45b0
# via chromadb
opentelemetry-proto==1.24.0
# via opentelemetry-exporter-otlp-proto-common
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-sdk==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-semantic-conventions==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-util-http==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
ordered-set==4.1.0
# via deepdiff
orjson==3.10.3
# via chromadb
# via langsmith
# via litellm
overrides==7.7.0
# via chromadb
packaging==23.2
# via black
# via build
# via gunicorn
# via huggingface-hub
# via langchain-core
# via langfuse
# via marshmallow
# via onnxruntime
# via transformers
# via unstructured-client
pandas==2.2.2
# via open-webui
passlib==1.7.4
# via open-webui
pathspec==0.12.1
# via black
peewee==3.17.3
# via open-webui
# via peewee-migrate
peewee-migrate==1.12.2
# via open-webui
pillow==10.3.0
# via fpdf2
# via rapidocr-onnxruntime
# via sentence-transformers
platformdirs==4.2.1
# via black
posthog==3.5.0
# via chromadb
proto-plus==1.23.0
# via google-ai-generativelanguage
# via google-api-core
protobuf==4.25.3
# via google-ai-generativelanguage
# via google-api-core
# via google-generativeai
# via googleapis-common-protos
# via grpcio-status
# via onnxruntime
# via opentelemetry-proto
# via proto-plus
psycopg2-binary==2.9.9
# via open-webui
pulsar-client==3.5.0
# via chromadb
pyasn1==0.6.0
# via pyasn1-modules
# via python-jose
# via rsa
pyasn1-modules==0.4.0
# via google-auth
pyclipper==1.3.0.post5
# via rapidocr-onnxruntime
pycparser==2.22
# via cffi
pydantic==2.7.1
# via chromadb
# via fastapi
# via fastapi-sso
# via google-generativeai
# via langchain
# via langchain-core
# via langfuse
# via langsmith
# via open-webui
# via openai
pydantic-core==2.18.2
# via pydantic
pygments==2.18.0
# via rich
pyjwt==2.8.0
# via litellm
# via open-webui
pymysql==1.1.0
# via open-webui
pypandoc==1.13
# via open-webui
pyparsing==3.1.2
# via httplib2
pypdf==4.2.0
# via open-webui
# via unstructured-client
pypika==0.48.9
# via chromadb
pyproject-hooks==1.1.0
# via build
python-dateutil==2.9.0.post0
# via botocore
# via kubernetes
# via pandas
# via posthog
# via unstructured-client
python-dotenv==1.0.1
# via litellm
# via uvicorn
python-engineio==4.9.0
# via python-socketio
python-iso639==2024.4.27
# via unstructured
python-jose==3.3.0
# via open-webui
python-magic==0.4.27
# via unstructured
python-multipart==0.0.9
# via litellm
# via open-webui
python-socketio==5.11.2
# via open-webui
pytube==15.0.0
# via open-webui
pytz==2024.1
# via apscheduler
# via pandas
pyxlsb==1.0.10
# via open-webui
pyyaml==6.0.1
# via chromadb
# via ctranslate2
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langchain-core
# via litellm
# via rapidocr-onnxruntime
# via transformers
# via uvicorn
rank-bm25==0.2.2
# via open-webui
rapidfuzz==3.9.0
# via unstructured
rapidocr-onnxruntime==1.2.3
# via open-webui
redis==5.0.4
# via rq
regex==2024.5.10
# via nltk
# via tiktoken
# via transformers
requests==2.31.0
# via chromadb
# via google-api-core
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langsmith
# via litellm
# via open-webui
# via posthog
# via requests-oauthlib
# via tiktoken
# via transformers
# via unstructured
# via unstructured-client
# via youtube-transcript-api
requests-oauthlib==2.0.0
# via kubernetes
rich==13.7.1
# via typer
rq==1.16.2
# via litellm
rsa==4.9
# via google-auth
# via python-jose
s3transfer==0.10.1
# via boto3
safetensors==0.4.3
# via transformers
scikit-learn==1.4.2
# via sentence-transformers
scipy==1.13.0
# via scikit-learn
# via sentence-transformers
sentence-transformers==2.7.0
# via open-webui
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
shapely==2.0.4
# via rapidocr-onnxruntime
shellingham==1.5.4
# via typer
simple-websocket==1.0.0
# via python-engineio
six==1.16.0
# via apscheduler
# via ecdsa
# via kubernetes
# via langdetect
# via posthog
# via python-dateutil
# via rapidocr-onnxruntime
# via unstructured-client
sniffio==1.3.1
# via anyio
# via httpx
# via openai
soupsieve==2.5
# via beautifulsoup4
sqlalchemy==2.0.30
# via langchain
# via langchain-community
starlette==0.36.3
# via fastapi
sympy==1.12
# via onnxruntime
# via torch
tabulate==0.9.0
# via unstructured
tenacity==8.3.0
# via chromadb
# via langchain
# via langchain-community
# via langchain-core
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.6.0
# via litellm
tokenizers==0.15.2
# via chromadb
# via faster-whisper
# via litellm
# via transformers
torch==2.3.0
# via sentence-transformers
tqdm==4.66.4
# via chromadb
# via google-generativeai
# via huggingface-hub
# via nltk
# via openai
# via sentence-transformers
# via transformers
transformers==4.39.3
# via sentence-transformers
typer==0.12.3
# via chromadb
typing-extensions==4.11.0
# via chromadb
# via fastapi
# via google-generativeai
# via huggingface-hub
# via openai
# via opentelemetry-sdk
# via pydantic
# via pydantic-core
# via sqlalchemy
# via torch
# via typer
# via typing-inspect
# via unstructured
# via unstructured-client
typing-inspect==0.9.0
# via dataclasses-json
# via unstructured-client
tzdata==2024.1
# via pandas
tzlocal==5.2
# via apscheduler
unstructured==0.11.8
# via open-webui
unstructured-client==0.22.0
# via unstructured
uritemplate==4.1.1
# via google-api-python-client
urllib3==2.2.1
# via botocore
# via kubernetes
# via requests
# via unstructured-client
uvicorn==0.22.0
# via chromadb
# via litellm
# via open-webui
uvloop==0.19.0
# via uvicorn
validators==0.28.1
# via open-webui
watchfiles==0.21.0
# via uvicorn
websocket-client==1.8.0
# via kubernetes
websockets==12.0
# via uvicorn
werkzeug==3.0.3
# via flask
wrapt==1.16.0
# via deprecated
# via langfuse
# via opentelemetry-instrumentation
# via unstructured
wsproto==1.2.0
# via simple-websocket
xlrd==2.0.1
# via open-webui
yarl==1.9.4
# via aiohttp
youtube-transcript-api==0.6.2
# via open-webui
zipp==3.18.1
# via importlib-metadata
import { OLLAMA_API_BASE_URL } from '$lib/constants'; import { OLLAMA_API_BASE_URL } from '$lib/constants';
import { promptTemplate } from '$lib/utils'; import { promptTemplate } from '$lib/utils';
export const getOllamaConfig = async (token: string = '') => {
let error = null;
const res = await fetch(`${OLLAMA_API_BASE_URL}/config`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
} else {
error = 'Server connection failed';
}
return null;
});
if (error) {
throw error;
}
return res;
};
export const updateOllamaConfig = async (token: string = '', enable_ollama_api: boolean) => {
let error = null;
const res = await fetch(`${OLLAMA_API_BASE_URL}/config/update`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
},
body: JSON.stringify({
enable_ollama_api: enable_ollama_api
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
} else {
error = 'Server connection failed';
}
return null;
});
if (error) {
throw error;
}
return res;
};
export const getOllamaUrls = async (token: string = '') => { export const getOllamaUrls = async (token: string = '') => {
let error = null; let error = null;
......
...@@ -123,7 +123,7 @@ ...@@ -123,7 +123,7 @@
} }
onMount(async () => { onMount(async () => {
if (!chatId) { if (!$chatId) {
await initNewChat(); await initNewChat();
} else { } else {
if (!($settings.saveChatHistory ?? true)) { if (!($settings.saveChatHistory ?? true)) {
...@@ -442,8 +442,7 @@ ...@@ -442,8 +442,7 @@
: undefined, : undefined,
...messages ...messages
] ]
.filter((message) => message) .filter((message) => message?.content?.trim())
.filter((message) => message.content != '')
.map((message, idx, arr) => { .map((message, idx, arr) => {
// Prepare the base message object // Prepare the base message object
const baseMessage = { const baseMessage = {
...@@ -703,7 +702,7 @@ ...@@ -703,7 +702,7 @@
: undefined, : undefined,
...messages ...messages
] ]
.filter((message) => message) .filter((message) => message?.content?.trim())
.map((message, idx, arr) => ({ .map((message, idx, arr) => ({
role: message.role, role: message.role,
...((message.files?.filter((file) => file.type === 'image').length > 0 ?? false) && ...((message.files?.filter((file) => file.type === 'image').length > 0 ?? false) &&
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment