Unverified Commit 72354e06 authored by Timothy Jaeryang Baek's avatar Timothy Jaeryang Baek Committed by GitHub
Browse files

Merge pull request #2476 from open-webui/dev

0.2.0
parents 36e2a5e6 207e2503
......@@ -9,9 +9,15 @@ import time
import uuid
import logging
from apps.web.models.users import UserModel, UserUpdateForm, UserRoleUpdateForm, Users
from apps.web.models.auths import Auths
from apps.web.models.chats import Chats
from apps.webui.models.users import (
UserModel,
UserUpdateForm,
UserRoleUpdateForm,
UserSettings,
Users,
)
from apps.webui.models.auths import Auths
from apps.webui.models.chats import Chats
from utils.utils import get_verified_user, get_password_hash, get_admin_user
from constants import ERROR_MESSAGES
......@@ -68,6 +74,42 @@ async def update_user_role(form_data: UserRoleUpdateForm, user=Depends(get_admin
)
############################
# GetUserSettingsBySessionUser
############################
@router.get("/user/settings", response_model=Optional[UserSettings])
async def get_user_settings_by_session_user(user=Depends(get_verified_user)):
user = Users.get_user_by_id(user.id)
if user:
return user.settings
else:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ERROR_MESSAGES.USER_NOT_FOUND,
)
############################
# UpdateUserSettingsBySessionUser
############################
@router.post("/user/settings/update", response_model=UserSettings)
async def update_user_settings_by_session_user(
form_data: UserSettings, user=Depends(get_verified_user)
):
user = Users.update_user_by_id(user.id, {"settings": form_data.model_dump()})
if user:
return user.settings
else:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ERROR_MESSAGES.USER_NOT_FOUND,
)
############################
# GetUserById
############################
......@@ -81,6 +123,8 @@ class UserResponse(BaseModel):
@router.get("/{user_id}", response_model=UserResponse)
async def get_user_by_id(user_id: str, user=Depends(get_verified_user)):
# Check if user_id is a shared chat
# If it is, get the user_id from the chat
if user_id.startswith("shared-"):
chat_id = user_id.replace("shared-", "")
chat = Chats.get_chat_by_id(chat_id)
......
......@@ -8,7 +8,7 @@ from pydantic import BaseModel
from fpdf import FPDF
import markdown
from apps.web.internal.db import DB
from apps.webui.internal.db import DB
from utils.utils import get_admin_user
from utils.misc import calculate_sha256, get_gravatar_url
......@@ -107,3 +107,12 @@ async def download_db(user=Depends(get_admin_user)):
media_type="application/octet-stream",
filename="webui.db",
)
@router.get("/litellm/config")
async def download_litellm_config_yaml(user=Depends(get_admin_user)):
return FileResponse(
f"{DATA_DIR}/litellm/config.yaml",
media_type="application/octet-stream",
filename="config.yaml",
)
import os
import sys
import logging
import importlib.metadata
import pkgutil
import chromadb
from chromadb import Settings
from base64 import b64encode
from bs4 import BeautifulSoup
from typing import TypeVar, Generic, Union
from pydantic import BaseModel
from typing import Optional
from pathlib import Path
import json
......@@ -22,10 +26,15 @@ from constants import ERROR_MESSAGES
# Load .env file
####################################
BACKEND_DIR = Path(__file__).parent # the path containing this file
BASE_DIR = BACKEND_DIR.parent # the path containing the backend/
print(BASE_DIR)
try:
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv("../.env"))
load_dotenv(find_dotenv(str(BASE_DIR / ".env")))
except ImportError:
print("dotenv not installed, skipping...")
......@@ -51,7 +60,6 @@ log_sources = [
"CONFIG",
"DB",
"IMAGES",
"LITELLM",
"MAIN",
"MODELS",
"OLLAMA",
......@@ -87,10 +95,12 @@ WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png"
ENV = os.environ.get("ENV", "dev")
try:
with open(f"../package.json", "r") as f:
PACKAGE_DATA = json.load(f)
PACKAGE_DATA = json.loads((BASE_DIR / "package.json").read_text())
except:
PACKAGE_DATA = {"version": "0.0.0"}
try:
PACKAGE_DATA = {"version": importlib.metadata.version("open-webui")}
except importlib.metadata.PackageNotFoundError:
PACKAGE_DATA = {"version": "0.0.0"}
VERSION = PACKAGE_DATA["version"]
......@@ -115,10 +125,13 @@ def parse_section(section):
try:
with open("../CHANGELOG.md", "r") as file:
changelog_path = BASE_DIR / "CHANGELOG.md"
with open(str(changelog_path.absolute()), "r", encoding="utf8") as file:
changelog_content = file.read()
except:
changelog_content = ""
changelog_content = (pkgutil.get_data("open_webui", "CHANGELOG.md") or b"").decode()
# Convert markdown content to HTML
html_content = markdown.markdown(changelog_content)
......@@ -155,21 +168,20 @@ CHANGELOG = changelog_json
####################################
# WEBUI_VERSION
# WEBUI_BUILD_HASH
####################################
WEBUI_VERSION = os.environ.get("WEBUI_VERSION", "v1.0.0-alpha.100")
WEBUI_BUILD_HASH = os.environ.get("WEBUI_BUILD_HASH", "dev-build")
####################################
# DATA/FRONTEND BUILD DIR
####################################
DATA_DIR = str(Path(os.getenv("DATA_DIR", "./data")).resolve())
FRONTEND_BUILD_DIR = str(Path(os.getenv("FRONTEND_BUILD_DIR", "../build")))
DATA_DIR = Path(os.getenv("DATA_DIR", BACKEND_DIR / "data")).resolve()
FRONTEND_BUILD_DIR = Path(os.getenv("FRONTEND_BUILD_DIR", BASE_DIR / "build")).resolve()
try:
with open(f"{DATA_DIR}/config.json", "r") as f:
CONFIG_DATA = json.load(f)
CONFIG_DATA = json.loads((DATA_DIR / "config.json").read_text())
except:
CONFIG_DATA = {}
......@@ -279,11 +291,11 @@ JWT_EXPIRES_IN = PersistentConfig(
# Static DIR
####################################
STATIC_DIR = str(Path(os.getenv("STATIC_DIR", "./static")).resolve())
STATIC_DIR = Path(os.getenv("STATIC_DIR", BACKEND_DIR / "static")).resolve()
frontend_favicon = f"{FRONTEND_BUILD_DIR}/favicon.png"
if os.path.exists(frontend_favicon):
shutil.copyfile(frontend_favicon, f"{STATIC_DIR}/favicon.png")
frontend_favicon = FRONTEND_BUILD_DIR / "favicon.png"
if frontend_favicon.exists():
shutil.copyfile(frontend_favicon, STATIC_DIR / "favicon.png")
else:
logging.warning(f"Frontend favicon not found at {frontend_favicon}")
......@@ -368,16 +380,23 @@ def create_config_file(file_path):
LITELLM_CONFIG_PATH = f"{DATA_DIR}/litellm/config.yaml"
if not os.path.exists(LITELLM_CONFIG_PATH):
log.info("Config file doesn't exist. Creating...")
create_config_file(LITELLM_CONFIG_PATH)
log.info("Config file created successfully.")
# if not os.path.exists(LITELLM_CONFIG_PATH):
# log.info("Config file doesn't exist. Creating...")
# create_config_file(LITELLM_CONFIG_PATH)
# log.info("Config file created successfully.")
####################################
# OLLAMA_BASE_URL
####################################
ENABLE_OLLAMA_API = PersistentConfig(
"ENABLE_OLLAMA_API",
"ollama.enable",
os.environ.get("ENABLE_OLLAMA_API", "True").lower() == "true",
)
OLLAMA_API_BASE_URL = os.environ.get(
"OLLAMA_API_BASE_URL", "http://localhost:11434/api"
)
......@@ -549,6 +568,28 @@ WEBHOOK_URL = PersistentConfig(
ENABLE_ADMIN_EXPORT = os.environ.get("ENABLE_ADMIN_EXPORT", "True").lower() == "true"
ENABLE_COMMUNITY_SHARING = PersistentConfig(
"ENABLE_COMMUNITY_SHARING",
"ui.enable_community_sharing",
os.environ.get("ENABLE_COMMUNITY_SHARING", "True").lower() == "true",
)
class BannerModel(BaseModel):
id: str
type: str
title: Optional[str] = None
content: str
dismissible: bool
timestamp: int
WEBUI_BANNERS = PersistentConfig(
"WEBUI_BANNERS",
"ui.banners",
[BannerModel(**banner) for banner in json.loads("[]")],
)
####################################
# WEBUI_SECRET_KEY
####################################
......@@ -725,6 +766,75 @@ YOUTUBE_LOADER_LANGUAGE = PersistentConfig(
os.getenv("YOUTUBE_LOADER_LANGUAGE", "en").split(","),
)
ENABLE_RAG_WEB_SEARCH = PersistentConfig(
"ENABLE_RAG_WEB_SEARCH",
"rag.web.search.enable",
os.getenv("ENABLE_RAG_WEB_SEARCH", "False").lower() == "true",
)
RAG_WEB_SEARCH_ENGINE = PersistentConfig(
"RAG_WEB_SEARCH_ENGINE",
"rag.web.search.engine",
os.getenv("RAG_WEB_SEARCH_ENGINE", ""),
)
SEARXNG_QUERY_URL = PersistentConfig(
"SEARXNG_QUERY_URL",
"rag.web.search.searxng_query_url",
os.getenv("SEARXNG_QUERY_URL", ""),
)
GOOGLE_PSE_API_KEY = PersistentConfig(
"GOOGLE_PSE_API_KEY",
"rag.web.search.google_pse_api_key",
os.getenv("GOOGLE_PSE_API_KEY", ""),
)
GOOGLE_PSE_ENGINE_ID = PersistentConfig(
"GOOGLE_PSE_ENGINE_ID",
"rag.web.search.google_pse_engine_id",
os.getenv("GOOGLE_PSE_ENGINE_ID", ""),
)
BRAVE_SEARCH_API_KEY = PersistentConfig(
"BRAVE_SEARCH_API_KEY",
"rag.web.search.brave_search_api_key",
os.getenv("BRAVE_SEARCH_API_KEY", ""),
)
SERPSTACK_API_KEY = PersistentConfig(
"SERPSTACK_API_KEY",
"rag.web.search.serpstack_api_key",
os.getenv("SERPSTACK_API_KEY", ""),
)
SERPSTACK_HTTPS = PersistentConfig(
"SERPSTACK_HTTPS",
"rag.web.search.serpstack_https",
os.getenv("SERPSTACK_HTTPS", "True").lower() == "true",
)
SERPER_API_KEY = PersistentConfig(
"SERPER_API_KEY",
"rag.web.search.serper_api_key",
os.getenv("SERPER_API_KEY", ""),
)
RAG_WEB_SEARCH_RESULT_COUNT = PersistentConfig(
"RAG_WEB_SEARCH_RESULT_COUNT",
"rag.web.search.result_count",
int(os.getenv("RAG_WEB_SEARCH_RESULT_COUNT", "3")),
)
RAG_WEB_SEARCH_CONCURRENT_REQUESTS = PersistentConfig(
"RAG_WEB_SEARCH_CONCURRENT_REQUESTS",
"rag.web.search.concurrent_requests",
int(os.getenv("RAG_WEB_SEARCH_CONCURRENT_REQUESTS", "10")),
)
####################################
# Transcribe
####################################
......@@ -813,18 +923,6 @@ AUDIO_OPENAI_API_VOICE = PersistentConfig(
os.getenv("AUDIO_OPENAI_API_VOICE", "alloy"),
)
####################################
# LiteLLM
####################################
ENABLE_LITELLM = os.environ.get("ENABLE_LITELLM", "True").lower() == "true"
LITELLM_PROXY_PORT = int(os.getenv("LITELLM_PROXY_PORT", "14365"))
if LITELLM_PROXY_PORT < 0 or LITELLM_PROXY_PORT > 65535:
raise ValueError("Invalid port number for LITELLM_PROXY_PORT")
LITELLM_PROXY_HOST = os.getenv("LITELLM_PROXY_HOST", "127.0.0.1")
####################################
# Database
......
......@@ -32,6 +32,8 @@ class ERROR_MESSAGES(str, Enum):
COMMAND_TAKEN = "Uh-oh! This command is already registered. Please choose another command string."
FILE_EXISTS = "Uh-oh! This file is already registered. Please choose another file."
MODEL_ID_TAKEN = "Uh-oh! This model id is already registered. Please choose another model id string."
NAME_TAG_TAKEN = "Uh-oh! This name tag is already registered. Please choose another name tag string."
INVALID_TOKEN = (
"Your session has expired or the token is invalid. Please sign in again."
......@@ -78,3 +80,7 @@ class ERROR_MESSAGES(str, Enum):
INVALID_URL = (
"Oops! The URL you provided is invalid. Please double-check and try again."
)
WEB_SEARCH_ERROR = (
lambda err="": f"{err if err else 'Oops! Something went wrong while searching the web.'}"
)
......@@ -8,9 +8,11 @@ import sys
import logging
import aiohttp
import requests
import mimetypes
from fastapi import FastAPI, Request, Depends, status
from fastapi.staticfiles import StaticFiles
from fastapi.responses import JSONResponse
from fastapi import HTTPException
from fastapi.middleware.wsgi import WSGIMiddleware
from fastapi.middleware.cors import CORSMiddleware
......@@ -18,27 +20,25 @@ from starlette.exceptions import HTTPException as StarletteHTTPException
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import StreamingResponse, Response
from apps.ollama.main import app as ollama_app
from apps.openai.main import app as openai_app
from apps.litellm.main import (
app as litellm_app,
start_litellm_background,
shutdown_litellm_background,
)
from apps.ollama.main import app as ollama_app, get_all_models as get_ollama_models
from apps.openai.main import app as openai_app, get_all_models as get_openai_models
from apps.audio.main import app as audio_app
from apps.images.main import app as images_app
from apps.rag.main import app as rag_app
from apps.web.main import app as webui_app
from apps.webui.main import app as webui_app
import asyncio
from pydantic import BaseModel
from typing import List
from utils.utils import get_admin_user
from typing import List, Optional
from apps.webui.models.models import Models, ModelModel
from utils.utils import (
get_admin_user,
get_verified_user,
get_current_user,
get_http_authorization_cred,
)
from apps.rag.utils import rag_messages
from config import (
......@@ -52,7 +52,8 @@ from config import (
FRONTEND_BUILD_DIR,
CACHE_DIR,
STATIC_DIR,
ENABLE_LITELLM,
ENABLE_OPENAI_API,
ENABLE_OLLAMA_API,
ENABLE_MODEL_FILTER,
MODEL_FILTER_LIST,
GLOBAL_LOG_LEVEL,
......@@ -60,6 +61,7 @@ from config import (
WEBHOOK_URL,
ENABLE_ADMIN_EXPORT,
AppConfig,
WEBUI_BUILD_HASH,
)
from constants import ERROR_MESSAGES
......@@ -89,7 +91,8 @@ print(
|_|
v{VERSION} - building the best open-source AI user interface.
v{VERSION} - building the best open-source AI user interface.
{f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
https://github.com/open-webui/open-webui
"""
)
......@@ -97,11 +100,7 @@ https://github.com/open-webui/open-webui
@asynccontextmanager
async def lifespan(app: FastAPI):
if ENABLE_LITELLM:
asyncio.create_task(start_litellm_background())
yield
if ENABLE_LITELLM:
await shutdown_litellm_background()
app = FastAPI(
......@@ -109,13 +108,20 @@ app = FastAPI(
)
app.state.config = AppConfig()
app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.config.WEBHOOK_URL = WEBHOOK_URL
origins = ["*"]
app.state.MODELS = {}
origins = ["*"]
# Custom middleware to add security headers
# class SecurityHeadersMiddleware(BaseHTTPMiddleware):
......@@ -134,7 +140,8 @@ class RAGMiddleware(BaseHTTPMiddleware):
return_citations = False
if request.method == "POST" and (
"/api/chat" in request.url.path or "/chat/completions" in request.url.path
"/ollama/api/chat" in request.url.path
or "/chat/completions" in request.url.path
):
log.debug(f"request.url.path: {request.url.path}")
......@@ -219,6 +226,124 @@ class RAGMiddleware(BaseHTTPMiddleware):
app.add_middleware(RAGMiddleware)
class PipelineMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request: Request, call_next):
if request.method == "POST" and (
"/ollama/api/chat" in request.url.path
or "/chat/completions" in request.url.path
):
log.debug(f"request.url.path: {request.url.path}")
# Read the original request body
body = await request.body()
# Decode body to string
body_str = body.decode("utf-8")
# Parse string to JSON
data = json.loads(body_str) if body_str else {}
model_id = data["model"]
filters = [
model
for model in app.state.MODELS.values()
if "pipeline" in model
and "type" in model["pipeline"]
and model["pipeline"]["type"] == "filter"
and (
model["pipeline"]["pipelines"] == ["*"]
or any(
model_id == target_model_id
for target_model_id in model["pipeline"]["pipelines"]
)
)
]
sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
user = None
if len(sorted_filters) > 0:
try:
user = get_current_user(
get_http_authorization_cred(
request.headers.get("Authorization")
)
)
user = {"id": user.id, "name": user.name, "role": user.role}
except:
pass
model = app.state.MODELS[model_id]
if "pipeline" in model:
sorted_filters.append(model)
for filter in sorted_filters:
r = None
try:
urlIdx = filter["urlIdx"]
url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
if key != "":
headers = {"Authorization": f"Bearer {key}"}
r = requests.post(
f"{url}/{filter['id']}/filter/inlet",
headers=headers,
json={
"user": user,
"body": data,
},
)
r.raise_for_status()
data = r.json()
except Exception as e:
# Handle connection error here
print(f"Connection error: {e}")
if r is not None:
try:
res = r.json()
if "detail" in res:
return JSONResponse(
status_code=r.status_code,
content=res,
)
except:
pass
else:
pass
if "pipeline" not in app.state.MODELS[model_id]:
if "chat_id" in data:
del data["chat_id"]
if "title" in data:
del data["title"]
modified_body_bytes = json.dumps(data).encode("utf-8")
# Replace the request body with the modified one
request._body = modified_body_bytes
# Set custom header to ensure content-length matches new body length
request.headers.__dict__["_list"] = [
(b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
*[
(k, v)
for k, v in request.headers.raw
if k.lower() != b"content-length"
],
]
response = await call_next(request)
return response
async def _receive(self, body: bytes):
return {"type": "http.request", "body": body, "more_body": False}
app.add_middleware(PipelineMiddleware)
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
......@@ -230,6 +355,11 @@ app.add_middleware(
@app.middleware("http")
async def check_url(request: Request, call_next):
if len(app.state.MODELS) == 0:
await get_all_models()
else:
pass
start_time = int(time.time())
response = await call_next(request)
process_time = int(time.time()) - start_time
......@@ -246,9 +376,8 @@ async def update_embedding_function(request: Request, call_next):
return response
app.mount("/litellm/api", litellm_app)
app.mount("/ollama", ollama_app)
app.mount("/openai/api", openai_app)
app.mount("/openai", openai_app)
app.mount("/images/api/v1", images_app)
app.mount("/audio/api/v1", audio_app)
......@@ -259,6 +388,422 @@ app.mount("/api/v1", webui_app)
webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
async def get_all_models():
openai_models = []
ollama_models = []
if app.state.config.ENABLE_OPENAI_API:
openai_models = await get_openai_models()
openai_models = openai_models["data"]
if app.state.config.ENABLE_OLLAMA_API:
ollama_models = await get_ollama_models()
ollama_models = [
{
"id": model["model"],
"name": model["name"],
"object": "model",
"created": int(time.time()),
"owned_by": "ollama",
"ollama": model,
}
for model in ollama_models["models"]
]
models = openai_models + ollama_models
custom_models = Models.get_all_models()
for custom_model in custom_models:
if custom_model.base_model_id == None:
for model in models:
if (
custom_model.id == model["id"]
or custom_model.id == model["id"].split(":")[0]
):
model["name"] = custom_model.name
model["info"] = custom_model.model_dump()
else:
owned_by = "openai"
for model in models:
if (
custom_model.base_model_id == model["id"]
or custom_model.base_model_id == model["id"].split(":")[0]
):
owned_by = model["owned_by"]
break
models.append(
{
"id": custom_model.id,
"name": custom_model.name,
"object": "model",
"created": custom_model.created_at,
"owned_by": owned_by,
"info": custom_model.model_dump(),
"preset": True,
}
)
app.state.MODELS = {model["id"]: model for model in models}
webui_app.state.MODELS = app.state.MODELS
return models
@app.get("/api/models")
async def get_models(user=Depends(get_verified_user)):
models = await get_all_models()
# Filter out filter pipelines
models = [
model
for model in models
if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
]
if app.state.config.ENABLE_MODEL_FILTER:
if user.role == "user":
models = list(
filter(
lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
models,
)
)
return {"data": models}
return {"data": models}
@app.post("/api/chat/completed")
async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
data = form_data
model_id = data["model"]
filters = [
model
for model in app.state.MODELS.values()
if "pipeline" in model
and "type" in model["pipeline"]
and model["pipeline"]["type"] == "filter"
and (
model["pipeline"]["pipelines"] == ["*"]
or any(
model_id == target_model_id
for target_model_id in model["pipeline"]["pipelines"]
)
)
]
sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
model = app.state.MODELS[model_id]
if "pipeline" in model:
sorted_filters = [model] + sorted_filters
for filter in sorted_filters:
r = None
try:
urlIdx = filter["urlIdx"]
url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
if key != "":
headers = {"Authorization": f"Bearer {key}"}
r = requests.post(
f"{url}/{filter['id']}/filter/outlet",
headers=headers,
json={
"user": {"id": user.id, "name": user.name, "role": user.role},
"body": data,
},
)
r.raise_for_status()
data = r.json()
except Exception as e:
# Handle connection error here
print(f"Connection error: {e}")
if r is not None:
try:
res = r.json()
if "detail" in res:
return JSONResponse(
status_code=r.status_code,
content=res,
)
except:
pass
else:
pass
return data
@app.get("/api/pipelines/list")
async def get_pipelines_list(user=Depends(get_admin_user)):
responses = await get_openai_models(raw=True)
print(responses)
urlIdxs = [idx for idx, response in enumerate(responses) if "pipelines" in response]
return {
"data": [
{
"url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
"idx": urlIdx,
}
for urlIdx in urlIdxs
]
}
class AddPipelineForm(BaseModel):
url: str
urlIdx: int
@app.post("/api/pipelines/add")
async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
r = None
try:
urlIdx = form_data.urlIdx
url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
headers = {"Authorization": f"Bearer {key}"}
r = requests.post(
f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
)
r.raise_for_status()
data = r.json()
return {**data}
except Exception as e:
# Handle connection error here
print(f"Connection error: {e}")
detail = "Pipeline not found"
if r is not None:
try:
res = r.json()
if "detail" in res:
detail = res["detail"]
except:
pass
raise HTTPException(
status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
detail=detail,
)
class DeletePipelineForm(BaseModel):
id: str
urlIdx: int
@app.delete("/api/pipelines/delete")
async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
r = None
try:
urlIdx = form_data.urlIdx
url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
headers = {"Authorization": f"Bearer {key}"}
r = requests.delete(
f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
)
r.raise_for_status()
data = r.json()
return {**data}
except Exception as e:
# Handle connection error here
print(f"Connection error: {e}")
detail = "Pipeline not found"
if r is not None:
try:
res = r.json()
if "detail" in res:
detail = res["detail"]
except:
pass
raise HTTPException(
status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
detail=detail,
)
@app.get("/api/pipelines")
async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
r = None
try:
urlIdx
url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
headers = {"Authorization": f"Bearer {key}"}
r = requests.get(f"{url}/pipelines", headers=headers)
r.raise_for_status()
data = r.json()
return {**data}
except Exception as e:
# Handle connection error here
print(f"Connection error: {e}")
detail = "Pipeline not found"
if r is not None:
try:
res = r.json()
if "detail" in res:
detail = res["detail"]
except:
pass
raise HTTPException(
status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
detail=detail,
)
@app.get("/api/pipelines/{pipeline_id}/valves")
async def get_pipeline_valves(
urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
):
models = await get_all_models()
r = None
try:
url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
headers = {"Authorization": f"Bearer {key}"}
r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
r.raise_for_status()
data = r.json()
return {**data}
except Exception as e:
# Handle connection error here
print(f"Connection error: {e}")
detail = "Pipeline not found"
if r is not None:
try:
res = r.json()
if "detail" in res:
detail = res["detail"]
except:
pass
raise HTTPException(
status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
detail=detail,
)
@app.get("/api/pipelines/{pipeline_id}/valves/spec")
async def get_pipeline_valves_spec(
urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
):
models = await get_all_models()
r = None
try:
url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
headers = {"Authorization": f"Bearer {key}"}
r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
r.raise_for_status()
data = r.json()
return {**data}
except Exception as e:
# Handle connection error here
print(f"Connection error: {e}")
detail = "Pipeline not found"
if r is not None:
try:
res = r.json()
if "detail" in res:
detail = res["detail"]
except:
pass
raise HTTPException(
status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
detail=detail,
)
@app.post("/api/pipelines/{pipeline_id}/valves/update")
async def update_pipeline_valves(
urlIdx: Optional[int],
pipeline_id: str,
form_data: dict,
user=Depends(get_admin_user),
):
models = await get_all_models()
r = None
try:
url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
headers = {"Authorization": f"Bearer {key}"}
r = requests.post(
f"{url}/{pipeline_id}/valves/update",
headers=headers,
json={**form_data},
)
r.raise_for_status()
data = r.json()
return {**data}
except Exception as e:
# Handle connection error here
print(f"Connection error: {e}")
detail = "Pipeline not found"
if r is not None:
try:
res = r.json()
if "detail" in res:
detail = res["detail"]
except:
pass
raise HTTPException(
status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
detail=detail,
)
@app.get("/api/config")
async def get_app_config():
# Checking and Handling the Absence of 'ui' in CONFIG_DATA
......@@ -272,13 +817,18 @@ async def get_app_config():
"status": True,
"name": WEBUI_NAME,
"version": VERSION,
"auth": WEBUI_AUTH,
"default_locale": default_locale,
"images": images_app.state.config.ENABLED,
"default_models": webui_app.state.config.DEFAULT_MODELS,
"default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
"trusted_header_auth": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
"admin_export_enabled": ENABLE_ADMIN_EXPORT,
"features": {
"auth": WEBUI_AUTH,
"auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
"enable_signup": webui_app.state.config.ENABLE_SIGNUP,
"enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
"enable_image_generation": images_app.state.config.ENABLED,
"enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
"enable_admin_export": ENABLE_ADMIN_EXPORT,
},
}
......@@ -302,15 +852,6 @@ async def update_model_filter_config(
app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
app.state.config.MODEL_FILTER_LIST = form_data.models
ollama_app.state.config.ENABLE_MODEL_FILTER = app.state.config.ENABLE_MODEL_FILTER
ollama_app.state.config.MODEL_FILTER_LIST = app.state.config.MODEL_FILTER_LIST
openai_app.state.config.ENABLE_MODEL_FILTER = app.state.config.ENABLE_MODEL_FILTER
openai_app.state.config.MODEL_FILTER_LIST = app.state.config.MODEL_FILTER_LIST
litellm_app.state.ENABLE_MODEL_FILTER = app.state.config.ENABLE_MODEL_FILTER
litellm_app.state.MODEL_FILTER_LIST = app.state.config.MODEL_FILTER_LIST
return {
"enabled": app.state.config.ENABLE_MODEL_FILTER,
"models": app.state.config.MODEL_FILTER_LIST,
......@@ -331,7 +872,6 @@ class UrlForm(BaseModel):
@app.post("/api/webhook")
async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
app.state.config.WEBHOOK_URL = form_data.url
webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
return {
......@@ -339,6 +879,19 @@ async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
}
@app.get("/api/community_sharing", response_model=bool)
async def get_community_sharing_status(request: Request, user=Depends(get_admin_user)):
return webui_app.state.config.ENABLE_COMMUNITY_SHARING
@app.get("/api/community_sharing/toggle", response_model=bool)
async def toggle_community_sharing(request: Request, user=Depends(get_admin_user)):
webui_app.state.config.ENABLE_COMMUNITY_SHARING = (
not webui_app.state.config.ENABLE_COMMUNITY_SHARING
)
return webui_app.state.config.ENABLE_COMMUNITY_SHARING
@app.get("/api/version")
async def get_app_config():
return {
......@@ -408,6 +961,7 @@ app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
if os.path.exists(FRONTEND_BUILD_DIR):
mimetypes.add_type("text/javascript", ".js")
app.mount(
"/",
SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
......
import base64
import os
import random
from pathlib import Path
import typer
import uvicorn
app = typer.Typer()
KEY_FILE = Path.cwd() / ".webui_secret_key"
if (frontend_build_dir := Path(__file__).parent / "frontend").exists():
os.environ["FRONTEND_BUILD_DIR"] = str(frontend_build_dir)
@app.command()
def serve(
host: str = "0.0.0.0",
port: int = 8080,
):
if os.getenv("WEBUI_SECRET_KEY") is None:
typer.echo(
"Loading WEBUI_SECRET_KEY from file, not provided as an environment variable."
)
if not KEY_FILE.exists():
typer.echo(f"Generating a new secret key and saving it to {KEY_FILE}")
KEY_FILE.write_bytes(base64.b64encode(random.randbytes(12)))
typer.echo(f"Loading WEBUI_SECRET_KEY from {KEY_FILE}")
os.environ["WEBUI_SECRET_KEY"] = KEY_FILE.read_text()
if os.getenv("USE_CUDA_DOCKER", "false") == "true":
typer.echo(
"CUDA is enabled, appending LD_LIBRARY_PATH to include torch/cudnn & cublas libraries."
)
LD_LIBRARY_PATH = os.getenv("LD_LIBRARY_PATH", "").split(":")
os.environ["LD_LIBRARY_PATH"] = ":".join(
LD_LIBRARY_PATH
+ [
"/usr/local/lib/python3.11/site-packages/torch/lib",
"/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib",
]
)
import main # we need set environment variables before importing main
uvicorn.run(main.app, host=host, port=port, forwarded_allow_ips="*")
@app.command()
def dev(
host: str = "0.0.0.0",
port: int = 8080,
reload: bool = True,
):
uvicorn.run(
"main:app", host=host, port=port, reload=reload, forwarded_allow_ips="*"
)
if __name__ == "__main__":
app()
fastapi==0.109.2
fastapi==0.111.0
uvicorn[standard]==0.22.0
pydantic==2.7.1
python-multipart==0.0.9
Flask==3.0.3
Flask-Cors==4.0.0
Flask-Cors==4.0.1
python-socketio==5.11.2
python-jose==3.3.0
passlib[bcrypt]==1.7.4
requests==2.31.0
requests==2.32.2
aiohttp==3.9.5
peewee==3.17.3
peewee==3.17.5
peewee-migrate==1.12.2
psycopg2-binary==2.9.9
PyMySQL==1.1.0
bcrypt==4.1.2
PyMySQL==1.1.1
bcrypt==4.1.3
litellm[proxy]==1.35.28
boto3==1.34.95
boto3==1.34.110
argon2-cffi==23.1.0
APScheduler==3.10.4
google-generativeai==0.5.2
google-generativeai==0.5.4
langchain==0.1.16
langchain-community==0.0.34
langchain-chroma==0.1.0
langchain==0.2.0
langchain-community==0.2.0
langchain-chroma==0.1.1
fake-useragent==1.5.1
chromadb==0.4.24
chromadb==0.5.0
sentence-transformers==2.7.0
pypdf==4.2.0
docx2txt==0.8
python-pptx==0.6.23
unstructured==0.11.8
unstructured==0.14.0
Markdown==3.6
pypandoc==1.13
pandas==2.2.2
......@@ -46,16 +44,16 @@ xlrd==2.0.1
validators==0.28.1
opencv-python-headless==4.9.0.80
rapidocr-onnxruntime==1.2.3
rapidocr-onnxruntime==1.3.22
fpdf2==2.7.8
fpdf2==2.7.9
rank-bm25==0.2.2
faster-whisper==1.0.1
faster-whisper==1.0.2
PyJWT[crypto]==2.8.0
black==24.4.2
langfuse==2.27.3
langfuse==2.33.0
youtube-transcript-api==0.6.2
pytube
\ No newline at end of file
pytube==15.0.0
\ No newline at end of file
......@@ -30,4 +30,29 @@ if [ "$USE_CUDA_DOCKER" = "true" ]; then
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.11/site-packages/torch/lib:/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib"
fi
# Check if SPACE_ID is set, if so, configure for space
if [ -n "$SPACE_ID" ]; then
echo "Configuring for HuggingFace Space deployment"
if [ -n "$ADMIN_USER_EMAIL" ] && [ -n "$ADMIN_USER_PASSWORD" ]; then
echo "Admin user configured, creating"
WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*' &
webui_pid=$!
echo "Waiting for webui to start..."
while ! curl -s http://localhost:8080/health > /dev/null; do
sleep 1
done
echo "Creating admin user..."
curl \
-X POST "http://localhost:8080/api/v1/auths/signup" \
-H "accept: application/json" \
-H "Content-Type: application/json" \
-d "{ \"email\": \"${ADMIN_USER_EMAIL}\", \"password\": \"${ADMIN_USER_PASSWORD}\", \"name\": \"Admin\" }"
echo "Shutting down webui..."
kill $webui_pid
fi
export WEBUI_URL=${SPACE_HOST}
fi
WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*'
from pathlib import Path
import hashlib
import json
import re
from datetime import timedelta
from typing import Optional
......@@ -110,3 +111,76 @@ def parse_duration(duration: str) -> Optional[timedelta]:
total_duration += timedelta(weeks=number)
return total_duration
def parse_ollama_modelfile(model_text):
parameters_meta = {
"mirostat": int,
"mirostat_eta": float,
"mirostat_tau": float,
"num_ctx": int,
"repeat_last_n": int,
"repeat_penalty": float,
"temperature": float,
"seed": int,
"stop": str,
"tfs_z": float,
"num_predict": int,
"top_k": int,
"top_p": float,
}
data = {"base_model_id": None, "params": {}}
# Parse base model
base_model_match = re.search(
r"^FROM\s+(\w+)", model_text, re.MULTILINE | re.IGNORECASE
)
if base_model_match:
data["base_model_id"] = base_model_match.group(1)
# Parse template
template_match = re.search(
r'TEMPLATE\s+"""(.+?)"""', model_text, re.DOTALL | re.IGNORECASE
)
if template_match:
data["params"] = {"template": template_match.group(1).strip()}
# Parse stops
stops = re.findall(r'PARAMETER stop "(.*?)"', model_text, re.IGNORECASE)
if stops:
data["params"]["stop"] = stops
# Parse other parameters from the provided list
for param, param_type in parameters_meta.items():
param_match = re.search(rf"PARAMETER {param} (.+)", model_text, re.IGNORECASE)
if param_match:
value = param_match.group(1)
if param_type == int:
value = int(value)
elif param_type == float:
value = float(value)
data["params"][param] = value
# Parse adapter
adapter_match = re.search(r"ADAPTER (.+)", model_text, re.IGNORECASE)
if adapter_match:
data["params"]["adapter"] = adapter_match.group(1)
# Parse system description
system_desc_match = re.search(
r'SYSTEM\s+"""(.+?)"""', model_text, re.DOTALL | re.IGNORECASE
)
if system_desc_match:
data["params"]["system"] = system_desc_match.group(1).strip()
# Parse messages
messages = []
message_matches = re.findall(r"MESSAGE (\w+) (.+)", model_text, re.IGNORECASE)
for role, content in message_matches:
messages.append({"role": role, "content": content})
if messages:
data["params"]["messages"] = messages
return data
from apps.webui.models.models import Models, ModelModel, ModelForm, ModelResponse
def get_model_id_from_custom_model_id(id: str):
model = Models.get_model_by_id(id)
if model:
return model.id
else:
return id
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi import HTTPException, status, Depends
from apps.web.models.users import Users
from apps.webui.models.users import Users
from pydantic import BaseModel
from typing import Union, Optional
......
......@@ -74,5 +74,28 @@ describe('Settings', () => {
expect(spy).to.be.callCount(2);
});
});
it('user can generate image', () => {
// Click on the model selector
cy.get('button[aria-label="Select a model"]').click();
// Select the first model
cy.get('button[aria-label="model-item"]').first().click();
// Type a message
cy.get('#chat-textarea').type('Hi, what can you do? A single sentence only please.', {
force: true
});
// Send the message
cy.get('button[type="submit"]').click();
// User's message should be visible
cy.get('.chat-user').should('exist');
// Wait for the response
cy.get('.chat-assistant', { timeout: 120_000 }) // .chat-assistant is created after the first token is received
.find('div[aria-label="Generation Info"]', { timeout: 120_000 }) // Generation Info is created after the stop token is received
.should('exist');
// Click on the generate image button
cy.get('[aria-label="Generate Image"]').click();
// Wait for image to be visible
cy.get('img[data-cy="image"]', { timeout: 60_000 }).should('be.visible');
});
});
});
# This is an overlay that spins up stable-diffusion-webui for integration testing
# This is not designed to be used in production
services:
stable-diffusion-webui:
# Not built for ARM64
platform: linux/amd64
image: ghcr.io/neggles/sd-webui-docker:latest
restart: unless-stopped
environment:
CLI_ARGS: "--api --use-cpu all --precision full --no-half --skip-torch-cuda-test --ckpt /empty.pt --do-not-download-clip --disable-nan-check --disable-opt-split-attention"
PYTHONUNBUFFERED: "1"
TERM: "vt100"
SD_WEBUI_VARIANT: "default"
# Hack to get container working on Apple Silicon
# Rosetta creates a conflict ${HOME}/.cache folder
entrypoint: /bin/bash
command:
- -c
- |
export HOME=/root-home
rm -rf $${HOME}/.cache
/docker/entrypoint.sh python -u webui.py --listen --port $${WEBUI_PORT} --skip-version-check $${CLI_ARGS}
volumes:
- ./test/test_files/image_gen/sd-empty.pt:/empty.pt
open-webui:
environment:
ENABLE_IMAGE_GENERATION: "true"
AUTOMATIC1111_BASE_URL: http://stable-diffusion-webui:7860
IMAGE_SIZE: "64x64"
IMAGE_STEPS: "3"
# noqa: INP001
import os
import shutil
import subprocess
from sys import stderr
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomBuildHook(BuildHookInterface):
def initialize(self, version, build_data):
super().initialize(version, build_data)
stderr.write(">>> Building Open Webui frontend\n")
npm = shutil.which("npm")
if npm is None:
raise RuntimeError(
"NodeJS `npm` is required for building Open Webui but it was not found"
)
stderr.write("### npm install\n")
subprocess.run([npm, "install"], check=True) # noqa: S603
stderr.write("\n### npm run build\n")
os.environ["APP_BUILD_HASH"] = version
subprocess.run([npm, "run", "build"], check=True) # noqa: S603
{
"name": "open-webui",
"version": "0.1.125",
"version": "0.2.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "open-webui",
"version": "0.1.125",
"version": "0.2.0",
"dependencies": {
"@pyscript/core": "^0.4.32",
"@sveltejs/adapter-node": "^1.3.1",
......@@ -24,6 +24,7 @@
"katex": "^0.16.9",
"marked": "^9.1.0",
"pyodide": "^0.26.0-alpha.4",
"sortablejs": "^1.15.2",
"svelte-sonner": "^0.3.19",
"tippy.js": "^6.3.7",
"uuid": "^9.0.1"
......@@ -6913,6 +6914,11 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/sortablejs": {
"version": "1.15.2",
"resolved": "https://registry.npmjs.org/sortablejs/-/sortablejs-1.15.2.tgz",
"integrity": "sha512-FJF5jgdfvoKn1MAKSdGs33bIqLi3LmsgVTliuX6iITj834F+JRQZN90Z93yql8h0K2t0RwDPBmxwlbZfDcxNZA=="
},
"node_modules/source-map-js": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz",
......
{
"name": "open-webui",
"version": "0.1.125",
"version": "0.2.0",
"private": true,
"scripts": {
"dev": "npm run pyodide:fetch && vite dev --host",
......@@ -13,7 +13,7 @@
"lint:types": "npm run check",
"lint:backend": "pylint backend/",
"format": "prettier --plugin-search-dir --write \"**/*.{js,ts,svelte,css,md,html,json}\"",
"format:backend": "black . --exclude \"/venv/\"",
"format:backend": "black . --exclude \".venv/|/venv/\"",
"i18n:parse": "i18next --config i18next-parser.config.ts && prettier --write \"src/lib/i18n/**/*.{js,json}\"",
"cy:open": "cypress open",
"test:frontend": "vitest",
......@@ -64,6 +64,7 @@
"katex": "^0.16.9",
"marked": "^9.1.0",
"pyodide": "^0.26.0-alpha.4",
"sortablejs": "^1.15.2",
"svelte-sonner": "^0.3.19",
"tippy.js": "^6.3.7",
"uuid": "^9.0.1"
......
[project]
name = "open-webui"
description = "Open WebUI (Formerly Ollama WebUI)"
authors = [
{ name = "Timothy Jaeryang Baek", email = "tim@openwebui.com" }
]
license = { file = "LICENSE" }
dependencies = [
"fastapi==0.111.0",
"uvicorn[standard]==0.22.0",
"pydantic==2.7.1",
"python-multipart==0.0.9",
"Flask==3.0.3",
"Flask-Cors==4.0.1",
"python-socketio==5.11.2",
"python-jose==3.3.0",
"passlib[bcrypt]==1.7.4",
"requests==2.32.2",
"aiohttp==3.9.5",
"peewee==3.17.5",
"peewee-migrate==1.12.2",
"psycopg2-binary==2.9.9",
"PyMySQL==1.1.0",
"bcrypt==4.1.3",
"litellm[proxy]==1.37.20",
"boto3==1.34.110",
"argon2-cffi==23.1.0",
"APScheduler==3.10.4",
"google-generativeai==0.5.4",
"langchain==0.2.0",
"langchain-community==0.2.0",
"langchain-chroma==0.1.1",
"fake-useragent==1.5.1",
"chromadb==0.5.0",
"sentence-transformers==2.7.0",
"pypdf==4.2.0",
"docx2txt==0.8",
"unstructured==0.14.0",
"Markdown==3.6",
"pypandoc==1.13",
"pandas==2.2.2",
"openpyxl==3.1.2",
"pyxlsb==1.0.10",
"xlrd==2.0.1",
"validators==0.28.1",
"opencv-python-headless==4.9.0.80",
"rapidocr-onnxruntime==1.3.22",
"fpdf2==2.7.9",
"rank-bm25==0.2.2",
"faster-whisper==1.0.2",
"PyJWT[crypto]==2.8.0",
"black==24.4.2",
"langfuse==2.33.0",
"youtube-transcript-api==0.6.2",
"pytube==15.0.0",
]
readme = "README.md"
requires-python = ">= 3.11, < 3.12.0a1"
dynamic = ["version"]
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.11",
"Topic :: Communications :: Chat",
"Topic :: Multimedia",
]
[project.scripts]
open-webui = "open_webui:app"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.rye]
managed = true
dev-dependencies = []
[tool.hatch.metadata]
allow-direct-references = true
[tool.hatch.version]
path = "package.json"
pattern = '"version":\s*"(?P<version>[^"]+)"'
[tool.hatch.build.hooks.custom] # keep this for reading hooks from `hatch_build.py`
[tool.hatch.build.targets.wheel]
sources = ["backend"]
exclude = [
".dockerignore",
".gitignore",
".webui_secret_key",
"dev.sh",
"requirements.txt",
"start.sh",
"start_windows.bat",
"webui.db",
"chroma.sqlite3",
]
force-include = { "CHANGELOG.md" = "open_webui/CHANGELOG.md", build = "open_webui/frontend" }
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
-e file:.
aiohttp==3.9.5
# via langchain
# via langchain-community
# via litellm
# via open-webui
aiosignal==1.3.1
# via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.3.0
# via httpx
# via openai
# via starlette
# via watchfiles
apscheduler==3.10.4
# via litellm
# via open-webui
argon2-cffi==23.1.0
# via open-webui
argon2-cffi-bindings==21.2.0
# via argon2-cffi
asgiref==3.8.1
# via opentelemetry-instrumentation-asgi
attrs==23.2.0
# via aiohttp
av==11.0.0
# via faster-whisper
backoff==2.2.1
# via langfuse
# via litellm
# via posthog
# via unstructured
bcrypt==4.1.3
# via chromadb
# via open-webui
# via passlib
beautifulsoup4==4.12.3
# via unstructured
bidict==0.23.1
# via python-socketio
black==24.4.2
# via open-webui
blinker==1.8.2
# via flask
boto3==1.34.110
# via open-webui
botocore==1.34.110
# via boto3
# via s3transfer
build==1.2.1
# via chromadb
cachetools==5.3.3
# via google-auth
certifi==2024.2.2
# via httpcore
# via httpx
# via kubernetes
# via requests
# via unstructured-client
cffi==1.16.0
# via argon2-cffi-bindings
# via cryptography
chardet==5.2.0
# via unstructured
charset-normalizer==3.3.2
# via requests
# via unstructured-client
chroma-hnswlib==0.7.3
# via chromadb
chromadb==0.5.0
# via langchain-chroma
# via open-webui
click==8.1.7
# via black
# via flask
# via litellm
# via nltk
# via peewee-migrate
# via rq
# via typer
# via uvicorn
coloredlogs==15.0.1
# via onnxruntime
cryptography==42.0.7
# via litellm
# via pyjwt
ctranslate2==4.2.1
# via faster-whisper
dataclasses-json==0.6.6
# via langchain
# via langchain-community
# via unstructured
# via unstructured-client
deepdiff==7.0.1
# via unstructured-client
defusedxml==0.7.1
# via fpdf2
deprecated==1.2.14
# via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc
distro==1.9.0
# via openai
dnspython==2.6.1
# via email-validator
docx2txt==0.8
# via open-webui
ecdsa==0.19.0
# via python-jose
email-validator==2.1.1
# via fastapi
# via pydantic
emoji==2.11.1
# via unstructured
et-xmlfile==1.1.0
# via openpyxl
fake-useragent==1.5.1
# via open-webui
fastapi==0.111.0
# via chromadb
# via fastapi-sso
# via langchain-chroma
# via litellm
# via open-webui
fastapi-cli==0.0.4
# via fastapi
fastapi-sso==0.10.0
# via litellm
faster-whisper==1.0.2
# via open-webui
filelock==3.14.0
# via huggingface-hub
# via torch
# via transformers
filetype==1.2.0
# via unstructured
flask==3.0.3
# via flask-cors
# via open-webui
flask-cors==4.0.1
# via open-webui
flatbuffers==24.3.25
# via onnxruntime
fonttools==4.51.0
# via fpdf2
fpdf2==2.7.9
# via open-webui
frozenlist==1.4.1
# via aiohttp
# via aiosignal
fsspec==2024.3.1
# via huggingface-hub
# via torch
google-ai-generativelanguage==0.6.4
# via google-generativeai
google-api-core==2.19.0
# via google-ai-generativelanguage
# via google-api-python-client
# via google-generativeai
google-api-python-client==2.129.0
# via google-generativeai
google-auth==2.29.0
# via google-ai-generativelanguage
# via google-api-core
# via google-api-python-client
# via google-auth-httplib2
# via google-generativeai
# via kubernetes
google-auth-httplib2==0.2.0
# via google-api-python-client
google-generativeai==0.5.4
# via open-webui
googleapis-common-protos==1.63.0
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio==1.63.0
# via chromadb
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio-status==1.62.2
# via google-api-core
gunicorn==22.0.0
# via litellm
h11==0.14.0
# via httpcore
# via uvicorn
# via wsproto
httpcore==1.0.5
# via httpx
httplib2==0.22.0
# via google-api-python-client
# via google-auth-httplib2
httptools==0.6.1
# via uvicorn
httpx==0.27.0
# via fastapi
# via fastapi-sso
# via langfuse
# via openai
huggingface-hub==0.23.0
# via faster-whisper
# via sentence-transformers
# via tokenizers
# via transformers
humanfriendly==10.0
# via coloredlogs
idna==3.7
# via anyio
# via email-validator
# via httpx
# via langfuse
# via requests
# via unstructured-client
# via yarl
importlib-metadata==7.0.0
# via litellm
# via opentelemetry-api
importlib-resources==6.4.0
# via chromadb
itsdangerous==2.2.0
# via flask
jinja2==3.1.4
# via fastapi
# via flask
# via litellm
# via torch
jmespath==1.0.1
# via boto3
# via botocore
joblib==1.4.2
# via nltk
# via scikit-learn
jsonpatch==1.33
# via langchain-core
jsonpath-python==1.0.6
# via unstructured-client
jsonpointer==2.4
# via jsonpatch
kubernetes==29.0.0
# via chromadb
langchain==0.2.0
# via langchain-community
# via open-webui
langchain-chroma==0.1.1
# via open-webui
langchain-community==0.2.0
# via open-webui
langchain-core==0.2.1
# via langchain
# via langchain-chroma
# via langchain-community
# via langchain-text-splitters
langchain-text-splitters==0.2.0
# via langchain
langdetect==1.0.9
# via unstructured
langfuse==2.33.0
# via open-webui
langsmith==0.1.57
# via langchain
# via langchain-community
# via langchain-core
litellm==1.37.20
# via open-webui
lxml==5.2.2
# via unstructured
markdown==3.6
# via open-webui
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via jinja2
# via werkzeug
marshmallow==3.21.2
# via dataclasses-json
# via unstructured-client
mdurl==0.1.2
# via markdown-it-py
mmh3==4.1.0
# via chromadb
monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.0.5
# via aiohttp
# via yarl
mypy-extensions==1.0.0
# via black
# via typing-inspect
# via unstructured-client
networkx==3.3
# via torch
nltk==3.8.1
# via unstructured
numpy==1.26.4
# via chroma-hnswlib
# via chromadb
# via ctranslate2
# via langchain
# via langchain-chroma
# via langchain-community
# via onnxruntime
# via opencv-python
# via opencv-python-headless
# via pandas
# via rank-bm25
# via rapidocr-onnxruntime
# via scikit-learn
# via scipy
# via sentence-transformers
# via shapely
# via transformers
# via unstructured
oauthlib==3.2.2
# via fastapi-sso
# via kubernetes
# via requests-oauthlib
onnxruntime==1.17.3
# via chromadb
# via faster-whisper
# via rapidocr-onnxruntime
openai==1.28.1
# via litellm
opencv-python==4.9.0.80
# via rapidocr-onnxruntime
opencv-python-headless==4.9.0.80
# via open-webui
openpyxl==3.1.2
# via open-webui
opentelemetry-api==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-instrumentation
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-exporter-otlp-proto-common==1.24.0
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-exporter-otlp-proto-grpc==1.24.0
# via chromadb
opentelemetry-instrumentation==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-asgi==0.45b0
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-fastapi==0.45b0
# via chromadb
opentelemetry-proto==1.24.0
# via opentelemetry-exporter-otlp-proto-common
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-sdk==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-semantic-conventions==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-util-http==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
ordered-set==4.1.0
# via deepdiff
orjson==3.10.3
# via chromadb
# via fastapi
# via langsmith
# via litellm
overrides==7.7.0
# via chromadb
packaging==23.2
# via black
# via build
# via gunicorn
# via huggingface-hub
# via langchain-core
# via langfuse
# via marshmallow
# via onnxruntime
# via transformers
# via unstructured-client
pandas==2.2.2
# via open-webui
passlib==1.7.4
# via open-webui
pathspec==0.12.1
# via black
peewee==3.17.5
# via open-webui
# via peewee-migrate
peewee-migrate==1.12.2
# via open-webui
pillow==10.3.0
# via fpdf2
# via rapidocr-onnxruntime
# via sentence-transformers
platformdirs==4.2.1
# via black
posthog==3.5.0
# via chromadb
proto-plus==1.23.0
# via google-ai-generativelanguage
# via google-api-core
protobuf==4.25.3
# via google-ai-generativelanguage
# via google-api-core
# via google-generativeai
# via googleapis-common-protos
# via grpcio-status
# via onnxruntime
# via opentelemetry-proto
# via proto-plus
psycopg2-binary==2.9.9
# via open-webui
pyasn1==0.6.0
# via pyasn1-modules
# via python-jose
# via rsa
pyasn1-modules==0.4.0
# via google-auth
pyclipper==1.3.0.post5
# via rapidocr-onnxruntime
pycparser==2.22
# via cffi
pydantic==2.7.1
# via chromadb
# via fastapi
# via fastapi-sso
# via google-generativeai
# via langchain
# via langchain-core
# via langfuse
# via langsmith
# via open-webui
# via openai
pydantic-core==2.18.2
# via pydantic
pygments==2.18.0
# via rich
pyjwt==2.8.0
# via litellm
# via open-webui
pymysql==1.1.0
# via open-webui
pypandoc==1.13
# via open-webui
pyparsing==3.1.2
# via httplib2
pypdf==4.2.0
# via open-webui
# via unstructured-client
pypika==0.48.9
# via chromadb
pyproject-hooks==1.1.0
# via build
python-dateutil==2.9.0.post0
# via botocore
# via kubernetes
# via pandas
# via posthog
# via unstructured-client
python-dotenv==1.0.1
# via litellm
# via uvicorn
python-engineio==4.9.0
# via python-socketio
python-iso639==2024.4.27
# via unstructured
python-jose==3.3.0
# via open-webui
python-magic==0.4.27
# via unstructured
python-multipart==0.0.9
# via fastapi
# via litellm
# via open-webui
python-socketio==5.11.2
# via open-webui
pytube==15.0.0
# via open-webui
pytz==2024.1
# via apscheduler
# via pandas
pyxlsb==1.0.10
# via open-webui
pyyaml==6.0.1
# via chromadb
# via ctranslate2
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langchain-core
# via litellm
# via rapidocr-onnxruntime
# via transformers
# via uvicorn
rank-bm25==0.2.2
# via open-webui
rapidfuzz==3.9.0
# via unstructured
rapidocr-onnxruntime==1.3.22
# via open-webui
redis==5.0.4
# via rq
regex==2024.5.10
# via nltk
# via tiktoken
# via transformers
requests==2.32.2
# via chromadb
# via google-api-core
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langsmith
# via litellm
# via open-webui
# via posthog
# via requests-oauthlib
# via tiktoken
# via transformers
# via unstructured
# via unstructured-client
# via youtube-transcript-api
requests-oauthlib==2.0.0
# via kubernetes
rich==13.7.1
# via typer
rq==1.16.2
# via litellm
rsa==4.9
# via google-auth
# via python-jose
s3transfer==0.10.1
# via boto3
safetensors==0.4.3
# via transformers
scikit-learn==1.4.2
# via sentence-transformers
scipy==1.13.0
# via scikit-learn
# via sentence-transformers
sentence-transformers==2.7.0
# via open-webui
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
shapely==2.0.4
# via rapidocr-onnxruntime
shellingham==1.5.4
# via typer
simple-websocket==1.0.0
# via python-engineio
six==1.16.0
# via apscheduler
# via ecdsa
# via kubernetes
# via langdetect
# via posthog
# via python-dateutil
# via rapidocr-onnxruntime
# via unstructured-client
sniffio==1.3.1
# via anyio
# via httpx
# via openai
soupsieve==2.5
# via beautifulsoup4
sqlalchemy==2.0.30
# via langchain
# via langchain-community
starlette==0.37.2
# via fastapi
sympy==1.12
# via onnxruntime
# via torch
tabulate==0.9.0
# via unstructured
tenacity==8.3.0
# via chromadb
# via langchain
# via langchain-community
# via langchain-core
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.6.0
# via litellm
tokenizers==0.15.2
# via chromadb
# via faster-whisper
# via litellm
# via transformers
torch==2.3.0
# via sentence-transformers
tqdm==4.66.4
# via chromadb
# via google-generativeai
# via huggingface-hub
# via nltk
# via openai
# via sentence-transformers
# via transformers
transformers==4.39.3
# via sentence-transformers
typer==0.12.3
# via chromadb
# via fastapi-cli
typing-extensions==4.11.0
# via chromadb
# via fastapi
# via google-generativeai
# via huggingface-hub
# via openai
# via opentelemetry-sdk
# via pydantic
# via pydantic-core
# via sqlalchemy
# via torch
# via typer
# via typing-inspect
# via unstructured
# via unstructured-client
typing-inspect==0.9.0
# via dataclasses-json
# via unstructured-client
tzdata==2024.1
# via pandas
tzlocal==5.2
# via apscheduler
ujson==5.10.0
# via fastapi
unstructured==0.14.0
# via open-webui
unstructured-client==0.22.0
# via unstructured
uritemplate==4.1.1
# via google-api-python-client
urllib3==2.2.1
# via botocore
# via kubernetes
# via requests
# via unstructured-client
uvicorn==0.22.0
# via chromadb
# via fastapi
# via litellm
# via open-webui
uvloop==0.19.0
# via uvicorn
validators==0.28.1
# via open-webui
watchfiles==0.21.0
# via uvicorn
websocket-client==1.8.0
# via kubernetes
websockets==12.0
# via uvicorn
werkzeug==3.0.3
# via flask
wrapt==1.16.0
# via deprecated
# via langfuse
# via opentelemetry-instrumentation
# via unstructured
wsproto==1.2.0
# via simple-websocket
xlrd==2.0.1
# via open-webui
yarl==1.9.4
# via aiohttp
youtube-transcript-api==0.6.2
# via open-webui
zipp==3.18.1
# via importlib-metadata
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
-e file:.
aiohttp==3.9.5
# via langchain
# via langchain-community
# via litellm
# via open-webui
aiosignal==1.3.1
# via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.3.0
# via httpx
# via openai
# via starlette
# via watchfiles
apscheduler==3.10.4
# via litellm
# via open-webui
argon2-cffi==23.1.0
# via open-webui
argon2-cffi-bindings==21.2.0
# via argon2-cffi
asgiref==3.8.1
# via opentelemetry-instrumentation-asgi
attrs==23.2.0
# via aiohttp
av==11.0.0
# via faster-whisper
backoff==2.2.1
# via langfuse
# via litellm
# via posthog
# via unstructured
bcrypt==4.1.3
# via chromadb
# via open-webui
# via passlib
beautifulsoup4==4.12.3
# via unstructured
bidict==0.23.1
# via python-socketio
black==24.4.2
# via open-webui
blinker==1.8.2
# via flask
boto3==1.34.110
# via open-webui
botocore==1.34.110
# via boto3
# via s3transfer
build==1.2.1
# via chromadb
cachetools==5.3.3
# via google-auth
certifi==2024.2.2
# via httpcore
# via httpx
# via kubernetes
# via requests
# via unstructured-client
cffi==1.16.0
# via argon2-cffi-bindings
# via cryptography
chardet==5.2.0
# via unstructured
charset-normalizer==3.3.2
# via requests
# via unstructured-client
chroma-hnswlib==0.7.3
# via chromadb
chromadb==0.5.0
# via langchain-chroma
# via open-webui
click==8.1.7
# via black
# via flask
# via litellm
# via nltk
# via peewee-migrate
# via rq
# via typer
# via uvicorn
coloredlogs==15.0.1
# via onnxruntime
cryptography==42.0.7
# via litellm
# via pyjwt
ctranslate2==4.2.1
# via faster-whisper
dataclasses-json==0.6.6
# via langchain
# via langchain-community
# via unstructured
# via unstructured-client
deepdiff==7.0.1
# via unstructured-client
defusedxml==0.7.1
# via fpdf2
deprecated==1.2.14
# via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc
distro==1.9.0
# via openai
dnspython==2.6.1
# via email-validator
docx2txt==0.8
# via open-webui
ecdsa==0.19.0
# via python-jose
email-validator==2.1.1
# via fastapi
# via pydantic
emoji==2.11.1
# via unstructured
et-xmlfile==1.1.0
# via openpyxl
fake-useragent==1.5.1
# via open-webui
fastapi==0.111.0
# via chromadb
# via fastapi-sso
# via langchain-chroma
# via litellm
# via open-webui
fastapi-cli==0.0.4
# via fastapi
fastapi-sso==0.10.0
# via litellm
faster-whisper==1.0.2
# via open-webui
filelock==3.14.0
# via huggingface-hub
# via torch
# via transformers
filetype==1.2.0
# via unstructured
flask==3.0.3
# via flask-cors
# via open-webui
flask-cors==4.0.1
# via open-webui
flatbuffers==24.3.25
# via onnxruntime
fonttools==4.51.0
# via fpdf2
fpdf2==2.7.9
# via open-webui
frozenlist==1.4.1
# via aiohttp
# via aiosignal
fsspec==2024.3.1
# via huggingface-hub
# via torch
google-ai-generativelanguage==0.6.4
# via google-generativeai
google-api-core==2.19.0
# via google-ai-generativelanguage
# via google-api-python-client
# via google-generativeai
google-api-python-client==2.129.0
# via google-generativeai
google-auth==2.29.0
# via google-ai-generativelanguage
# via google-api-core
# via google-api-python-client
# via google-auth-httplib2
# via google-generativeai
# via kubernetes
google-auth-httplib2==0.2.0
# via google-api-python-client
google-generativeai==0.5.4
# via open-webui
googleapis-common-protos==1.63.0
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio==1.63.0
# via chromadb
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio-status==1.62.2
# via google-api-core
gunicorn==22.0.0
# via litellm
h11==0.14.0
# via httpcore
# via uvicorn
# via wsproto
httpcore==1.0.5
# via httpx
httplib2==0.22.0
# via google-api-python-client
# via google-auth-httplib2
httptools==0.6.1
# via uvicorn
httpx==0.27.0
# via fastapi
# via fastapi-sso
# via langfuse
# via openai
huggingface-hub==0.23.0
# via faster-whisper
# via sentence-transformers
# via tokenizers
# via transformers
humanfriendly==10.0
# via coloredlogs
idna==3.7
# via anyio
# via email-validator
# via httpx
# via langfuse
# via requests
# via unstructured-client
# via yarl
importlib-metadata==7.0.0
# via litellm
# via opentelemetry-api
importlib-resources==6.4.0
# via chromadb
itsdangerous==2.2.0
# via flask
jinja2==3.1.4
# via fastapi
# via flask
# via litellm
# via torch
jmespath==1.0.1
# via boto3
# via botocore
joblib==1.4.2
# via nltk
# via scikit-learn
jsonpatch==1.33
# via langchain-core
jsonpath-python==1.0.6
# via unstructured-client
jsonpointer==2.4
# via jsonpatch
kubernetes==29.0.0
# via chromadb
langchain==0.2.0
# via langchain-community
# via open-webui
langchain-chroma==0.1.1
# via open-webui
langchain-community==0.2.0
# via open-webui
langchain-core==0.2.1
# via langchain
# via langchain-chroma
# via langchain-community
# via langchain-text-splitters
langchain-text-splitters==0.2.0
# via langchain
langdetect==1.0.9
# via unstructured
langfuse==2.33.0
# via open-webui
langsmith==0.1.57
# via langchain
# via langchain-community
# via langchain-core
litellm==1.37.20
# via open-webui
lxml==5.2.2
# via unstructured
markdown==3.6
# via open-webui
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via jinja2
# via werkzeug
marshmallow==3.21.2
# via dataclasses-json
# via unstructured-client
mdurl==0.1.2
# via markdown-it-py
mmh3==4.1.0
# via chromadb
monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.0.5
# via aiohttp
# via yarl
mypy-extensions==1.0.0
# via black
# via typing-inspect
# via unstructured-client
networkx==3.3
# via torch
nltk==3.8.1
# via unstructured
numpy==1.26.4
# via chroma-hnswlib
# via chromadb
# via ctranslate2
# via langchain
# via langchain-chroma
# via langchain-community
# via onnxruntime
# via opencv-python
# via opencv-python-headless
# via pandas
# via rank-bm25
# via rapidocr-onnxruntime
# via scikit-learn
# via scipy
# via sentence-transformers
# via shapely
# via transformers
# via unstructured
oauthlib==3.2.2
# via fastapi-sso
# via kubernetes
# via requests-oauthlib
onnxruntime==1.17.3
# via chromadb
# via faster-whisper
# via rapidocr-onnxruntime
openai==1.28.1
# via litellm
opencv-python==4.9.0.80
# via rapidocr-onnxruntime
opencv-python-headless==4.9.0.80
# via open-webui
openpyxl==3.1.2
# via open-webui
opentelemetry-api==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-instrumentation
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-exporter-otlp-proto-common==1.24.0
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-exporter-otlp-proto-grpc==1.24.0
# via chromadb
opentelemetry-instrumentation==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-asgi==0.45b0
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-fastapi==0.45b0
# via chromadb
opentelemetry-proto==1.24.0
# via opentelemetry-exporter-otlp-proto-common
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-sdk==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-semantic-conventions==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-util-http==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
ordered-set==4.1.0
# via deepdiff
orjson==3.10.3
# via chromadb
# via fastapi
# via langsmith
# via litellm
overrides==7.7.0
# via chromadb
packaging==23.2
# via black
# via build
# via gunicorn
# via huggingface-hub
# via langchain-core
# via langfuse
# via marshmallow
# via onnxruntime
# via transformers
# via unstructured-client
pandas==2.2.2
# via open-webui
passlib==1.7.4
# via open-webui
pathspec==0.12.1
# via black
peewee==3.17.5
# via open-webui
# via peewee-migrate
peewee-migrate==1.12.2
# via open-webui
pillow==10.3.0
# via fpdf2
# via rapidocr-onnxruntime
# via sentence-transformers
platformdirs==4.2.1
# via black
posthog==3.5.0
# via chromadb
proto-plus==1.23.0
# via google-ai-generativelanguage
# via google-api-core
protobuf==4.25.3
# via google-ai-generativelanguage
# via google-api-core
# via google-generativeai
# via googleapis-common-protos
# via grpcio-status
# via onnxruntime
# via opentelemetry-proto
# via proto-plus
psycopg2-binary==2.9.9
# via open-webui
pyasn1==0.6.0
# via pyasn1-modules
# via python-jose
# via rsa
pyasn1-modules==0.4.0
# via google-auth
pyclipper==1.3.0.post5
# via rapidocr-onnxruntime
pycparser==2.22
# via cffi
pydantic==2.7.1
# via chromadb
# via fastapi
# via fastapi-sso
# via google-generativeai
# via langchain
# via langchain-core
# via langfuse
# via langsmith
# via open-webui
# via openai
pydantic-core==2.18.2
# via pydantic
pygments==2.18.0
# via rich
pyjwt==2.8.0
# via litellm
# via open-webui
pymysql==1.1.0
# via open-webui
pypandoc==1.13
# via open-webui
pyparsing==3.1.2
# via httplib2
pypdf==4.2.0
# via open-webui
# via unstructured-client
pypika==0.48.9
# via chromadb
pyproject-hooks==1.1.0
# via build
python-dateutil==2.9.0.post0
# via botocore
# via kubernetes
# via pandas
# via posthog
# via unstructured-client
python-dotenv==1.0.1
# via litellm
# via uvicorn
python-engineio==4.9.0
# via python-socketio
python-iso639==2024.4.27
# via unstructured
python-jose==3.3.0
# via open-webui
python-magic==0.4.27
# via unstructured
python-multipart==0.0.9
# via fastapi
# via litellm
# via open-webui
python-socketio==5.11.2
# via open-webui
pytube==15.0.0
# via open-webui
pytz==2024.1
# via apscheduler
# via pandas
pyxlsb==1.0.10
# via open-webui
pyyaml==6.0.1
# via chromadb
# via ctranslate2
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langchain-core
# via litellm
# via rapidocr-onnxruntime
# via transformers
# via uvicorn
rank-bm25==0.2.2
# via open-webui
rapidfuzz==3.9.0
# via unstructured
rapidocr-onnxruntime==1.3.22
# via open-webui
redis==5.0.4
# via rq
regex==2024.5.10
# via nltk
# via tiktoken
# via transformers
requests==2.32.2
# via chromadb
# via google-api-core
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langsmith
# via litellm
# via open-webui
# via posthog
# via requests-oauthlib
# via tiktoken
# via transformers
# via unstructured
# via unstructured-client
# via youtube-transcript-api
requests-oauthlib==2.0.0
# via kubernetes
rich==13.7.1
# via typer
rq==1.16.2
# via litellm
rsa==4.9
# via google-auth
# via python-jose
s3transfer==0.10.1
# via boto3
safetensors==0.4.3
# via transformers
scikit-learn==1.4.2
# via sentence-transformers
scipy==1.13.0
# via scikit-learn
# via sentence-transformers
sentence-transformers==2.7.0
# via open-webui
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
shapely==2.0.4
# via rapidocr-onnxruntime
shellingham==1.5.4
# via typer
simple-websocket==1.0.0
# via python-engineio
six==1.16.0
# via apscheduler
# via ecdsa
# via kubernetes
# via langdetect
# via posthog
# via python-dateutil
# via rapidocr-onnxruntime
# via unstructured-client
sniffio==1.3.1
# via anyio
# via httpx
# via openai
soupsieve==2.5
# via beautifulsoup4
sqlalchemy==2.0.30
# via langchain
# via langchain-community
starlette==0.37.2
# via fastapi
sympy==1.12
# via onnxruntime
# via torch
tabulate==0.9.0
# via unstructured
tenacity==8.3.0
# via chromadb
# via langchain
# via langchain-community
# via langchain-core
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.6.0
# via litellm
tokenizers==0.15.2
# via chromadb
# via faster-whisper
# via litellm
# via transformers
torch==2.3.0
# via sentence-transformers
tqdm==4.66.4
# via chromadb
# via google-generativeai
# via huggingface-hub
# via nltk
# via openai
# via sentence-transformers
# via transformers
transformers==4.39.3
# via sentence-transformers
typer==0.12.3
# via chromadb
# via fastapi-cli
typing-extensions==4.11.0
# via chromadb
# via fastapi
# via google-generativeai
# via huggingface-hub
# via openai
# via opentelemetry-sdk
# via pydantic
# via pydantic-core
# via sqlalchemy
# via torch
# via typer
# via typing-inspect
# via unstructured
# via unstructured-client
typing-inspect==0.9.0
# via dataclasses-json
# via unstructured-client
tzdata==2024.1
# via pandas
tzlocal==5.2
# via apscheduler
ujson==5.10.0
# via fastapi
unstructured==0.14.0
# via open-webui
unstructured-client==0.22.0
# via unstructured
uritemplate==4.1.1
# via google-api-python-client
urllib3==2.2.1
# via botocore
# via kubernetes
# via requests
# via unstructured-client
uvicorn==0.22.0
# via chromadb
# via fastapi
# via litellm
# via open-webui
uvloop==0.19.0
# via uvicorn
validators==0.28.1
# via open-webui
watchfiles==0.21.0
# via uvicorn
websocket-client==1.8.0
# via kubernetes
websockets==12.0
# via uvicorn
werkzeug==3.0.3
# via flask
wrapt==1.16.0
# via deprecated
# via langfuse
# via opentelemetry-instrumentation
# via unstructured
wsproto==1.2.0
# via simple-websocket
xlrd==2.0.1
# via open-webui
yarl==1.9.4
# via aiohttp
youtube-transcript-api==0.6.2
# via open-webui
zipp==3.18.1
# via importlib-metadata
......@@ -325,6 +325,44 @@ export const getChatByShareId = async (token: string, share_id: string) => {
return res;
};
export const cloneChatById = async (token: string, id: string) => {
let error = null;
const res = await fetch(`${WEBUI_API_BASE_URL}/chats/${id}/clone`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.then((json) => {
return json;
})
.catch((err) => {
error = err;
if ('detail' in err) {
error = err.detail;
} else {
error = err;
}
console.log(err);
return null;
});
if (error) {
throw error;
}
return res;
};
export const shareChatById = async (token: string, id: string) => {
let error = null;
......@@ -654,3 +692,35 @@ export const deleteAllChats = async (token: string) => {
return res;
};
export const archiveAllChats = async (token: string) => {
let error = null;
const res = await fetch(`${WEBUI_API_BASE_URL}/chats/archive/all`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.then((json) => {
return json;
})
.catch((err) => {
error = err.detail;
console.log(err);
return null;
});
if (error) {
throw error;
}
return res;
};
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment