"examples/vscode:/vscode.git/clone" did not exist on "262d539a8a8f505dc72958f7ea50915a4b56dfac"
Commit 6847c2fc authored by Aryan Kothari's avatar Aryan Kothari
Browse files

Merge branch 'origin/dev' into sidebar-pagination [skip ci]

parents 06a64219 774defd1
...@@ -5,6 +5,37 @@ All notable changes to this project will be documented in this file. ...@@ -5,6 +5,37 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.3.11] - 2024-08-02
### Added
- **📊 Model Information Display**: Added visuals for model selection, including images next to model names for more intuitive navigation.
- **🗣 ElevenLabs Voice Adaptations**: Voice enhancements including support for ElevenLabs voice ID by name for personalized vocal interactions.
- **⌨️ Arrow Keys Model Selection**: Users can now use arrow keys for quicker model selection, enhancing accessibility.
- **🔍 Fuzzy Search in Model Selector**: Enhanced model selector with fuzzy search to locate models swiftly, including descriptions.
- **🕹️ ComfyUI Flux Image Generation**: Added support for the new Flux image gen model; introduces environment controls like weight precision and CLIP model options in Settings.
- **💾 Display File Size for Uploads**: Enhanced file interface now displays file size, preparing for upcoming upload restrictions.
- **🎚️ Advanced Params "Min P"**: Added 'Min P' parameter in the advanced settings for customized model precision control.
- **🔒 Enhanced OAuth**: Introduced custom redirect URI support for OAuth behind reverse proxies, enabling safer authentication processes.
- **🖥 Enhanced Latex Rendering**: Adjustments made to latex rendering processes, now accurately detecting and presenting latex inputs from text.
- **🌐 Internationalization**: Enhanced with new Romanian and updated Vietnamese and Ukrainian translations, helping broaden accessibility for international users.
### Fixed
- **🔧 Tags Handling in Document Upload**: Tags are now properly sent to the upload document handler, resolving issues with missing metadata.
- **🖥️ Sensitive Input Fields**: Corrected browser misinterpretation of secure input fields, preventing misclassification as password fields.
- **📂 Static Path Resolution in PDF Generation**: Fixed static paths that adjust dynamically to prevent issues across various environments.
### Changed
- **🎨 UI/UX Styling Enhancements**: Multiple minor styling updates for a cleaner and more intuitive user interface.
- **🚧 Refactoring Various Components**: Numerous refactoring changes across styling, file handling, and function simplifications for clarity and performance.
- **🎛️ User Valves Management**: Moved user valves from settings to direct chat controls for more user-friendly access during interactions.
### Removed
- **⚙️ Health Check Logging**: Removed verbose logging from the health checking processes to declutter logs and improve backend performance.
## [0.3.10] - 2024-07-17 ## [0.3.10] - 2024-07-17
### Fixed ### Fixed
......
...@@ -151,7 +151,7 @@ COPY --chown=$UID:$GID ./backend . ...@@ -151,7 +151,7 @@ COPY --chown=$UID:$GID ./backend .
EXPOSE 8080 EXPOSE 8080
HEALTHCHECK CMD curl --silent --fail http://localhost:8080/health | jq -e '.status == true' || exit 1 HEALTHCHECK CMD curl --silent --fail http://localhost:${PORT:-8080}/health | jq -ne 'input.status == true' || exit 1
USER $UID:$GID USER $UID:$GID
......
...@@ -10,12 +10,12 @@ from fastapi import ( ...@@ -10,12 +10,12 @@ from fastapi import (
File, File,
Form, Form,
) )
from fastapi.responses import StreamingResponse, JSONResponse, FileResponse from fastapi.responses import StreamingResponse, JSONResponse, FileResponse
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel from pydantic import BaseModel
from typing import List
import uuid import uuid
import requests import requests
import hashlib import hashlib
...@@ -31,6 +31,7 @@ from utils.utils import ( ...@@ -31,6 +31,7 @@ from utils.utils import (
) )
from utils.misc import calculate_sha256 from utils.misc import calculate_sha256
from config import ( from config import (
SRC_LOG_LEVELS, SRC_LOG_LEVELS,
CACHE_DIR, CACHE_DIR,
...@@ -252,15 +253,15 @@ async def speech(request: Request, user=Depends(get_verified_user)): ...@@ -252,15 +253,15 @@ async def speech(request: Request, user=Depends(get_verified_user)):
) )
elif app.state.config.TTS_ENGINE == "elevenlabs": elif app.state.config.TTS_ENGINE == "elevenlabs":
payload = None payload = None
try: try:
payload = json.loads(body.decode("utf-8")) payload = json.loads(body.decode("utf-8"))
except Exception as e: except Exception as e:
log.exception(e) log.exception(e)
pass raise HTTPException(status_code=400, detail="Invalid JSON payload")
url = f"https://api.elevenlabs.io/v1/text-to-speech/{payload['voice']}" voice_id = payload.get("voice", "")
url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}"
headers = { headers = {
"Accept": "audio/mpeg", "Accept": "audio/mpeg",
...@@ -435,3 +436,69 @@ def transcribe( ...@@ -435,3 +436,69 @@ def transcribe(
status_code=status.HTTP_400_BAD_REQUEST, status_code=status.HTTP_400_BAD_REQUEST,
detail=ERROR_MESSAGES.DEFAULT(e), detail=ERROR_MESSAGES.DEFAULT(e),
) )
def get_available_models() -> List[dict]:
if app.state.config.TTS_ENGINE == "openai":
return [{"id": "tts-1"}, {"id": "tts-1-hd"}]
elif app.state.config.TTS_ENGINE == "elevenlabs":
headers = {
"xi-api-key": app.state.config.TTS_API_KEY,
"Content-Type": "application/json",
}
try:
response = requests.get(
"https://api.elevenlabs.io/v1/models", headers=headers
)
response.raise_for_status()
models = response.json()
return [
{"name": model["name"], "id": model["model_id"]} for model in models
]
except requests.RequestException as e:
log.error(f"Error fetching voices: {str(e)}")
return []
@app.get("/models")
async def get_models(user=Depends(get_verified_user)):
return {"models": get_available_models()}
def get_available_voices() -> List[dict]:
if app.state.config.TTS_ENGINE == "openai":
return [
{"name": "alloy", "id": "alloy"},
{"name": "echo", "id": "echo"},
{"name": "fable", "id": "fable"},
{"name": "onyx", "id": "onyx"},
{"name": "nova", "id": "nova"},
{"name": "shimmer", "id": "shimmer"},
]
elif app.state.config.TTS_ENGINE == "elevenlabs":
headers = {
"xi-api-key": app.state.config.TTS_API_KEY,
"Content-Type": "application/json",
}
try:
response = requests.get(
"https://api.elevenlabs.io/v1/voices", headers=headers
)
response.raise_for_status()
voices_data = response.json()
voices = []
for voice in voices_data.get("voices", []):
voices.append({"name": voice["name"], "id": voice["voice_id"]})
return voices
except requests.RequestException as e:
log.error(f"Error fetching voices: {str(e)}")
return []
@app.get("/voices")
async def get_voices(user=Depends(get_verified_user)):
return {"voices": get_available_voices()}
...@@ -42,6 +42,9 @@ from config import ( ...@@ -42,6 +42,9 @@ from config import (
COMFYUI_SAMPLER, COMFYUI_SAMPLER,
COMFYUI_SCHEDULER, COMFYUI_SCHEDULER,
COMFYUI_SD3, COMFYUI_SD3,
COMFYUI_FLUX,
COMFYUI_FLUX_WEIGHT_DTYPE,
COMFYUI_FLUX_FP8_CLIP,
IMAGES_OPENAI_API_BASE_URL, IMAGES_OPENAI_API_BASE_URL,
IMAGES_OPENAI_API_KEY, IMAGES_OPENAI_API_KEY,
IMAGE_GENERATION_MODEL, IMAGE_GENERATION_MODEL,
...@@ -85,6 +88,9 @@ app.state.config.COMFYUI_CFG_SCALE = COMFYUI_CFG_SCALE ...@@ -85,6 +88,9 @@ app.state.config.COMFYUI_CFG_SCALE = COMFYUI_CFG_SCALE
app.state.config.COMFYUI_SAMPLER = COMFYUI_SAMPLER app.state.config.COMFYUI_SAMPLER = COMFYUI_SAMPLER
app.state.config.COMFYUI_SCHEDULER = COMFYUI_SCHEDULER app.state.config.COMFYUI_SCHEDULER = COMFYUI_SCHEDULER
app.state.config.COMFYUI_SD3 = COMFYUI_SD3 app.state.config.COMFYUI_SD3 = COMFYUI_SD3
app.state.config.COMFYUI_FLUX = COMFYUI_FLUX
app.state.config.COMFYUI_FLUX_WEIGHT_DTYPE = COMFYUI_FLUX_WEIGHT_DTYPE
app.state.config.COMFYUI_FLUX_FP8_CLIP = COMFYUI_FLUX_FP8_CLIP
def get_automatic1111_api_auth(): def get_automatic1111_api_auth():
...@@ -497,6 +503,15 @@ async def image_generations( ...@@ -497,6 +503,15 @@ async def image_generations(
if app.state.config.COMFYUI_SD3 is not None: if app.state.config.COMFYUI_SD3 is not None:
data["sd3"] = app.state.config.COMFYUI_SD3 data["sd3"] = app.state.config.COMFYUI_SD3
if app.state.config.COMFYUI_FLUX is not None:
data["flux"] = app.state.config.COMFYUI_FLUX
if app.state.config.COMFYUI_FLUX_WEIGHT_DTYPE is not None:
data["flux_weight_dtype"] = app.state.config.COMFYUI_FLUX_WEIGHT_DTYPE
if app.state.config.COMFYUI_FLUX_FP8_CLIP is not None:
data["flux_fp8_clip"] = app.state.config.COMFYUI_FLUX_FP8_CLIP
data = ImageGenerationPayload(**data) data = ImageGenerationPayload(**data)
res = comfyui_generate_image( res = comfyui_generate_image(
......
...@@ -125,6 +125,135 @@ COMFYUI_DEFAULT_PROMPT = """ ...@@ -125,6 +125,135 @@ COMFYUI_DEFAULT_PROMPT = """
} }
""" """
FLUX_DEFAULT_PROMPT = """
{
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage"
},
"6": {
"inputs": {
"text": "Input Text Here",
"clip": [
"11",
0
]
},
"class_type": "CLIPTextEncode"
},
"8": {
"inputs": {
"samples": [
"13",
0
],
"vae": [
"10",
0
]
},
"class_type": "VAEDecode"
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage"
},
"10": {
"inputs": {
"vae_name": "ae.sft"
},
"class_type": "VAELoader"
},
"11": {
"inputs": {
"clip_name1": "clip_l.safetensors",
"clip_name2": "t5xxl_fp16.safetensors",
"type": "flux"
},
"class_type": "DualCLIPLoader"
},
"12": {
"inputs": {
"unet_name": "flux1-dev.sft",
"weight_dtype": "default"
},
"class_type": "UNETLoader"
},
"13": {
"inputs": {
"noise": [
"25",
0
],
"guider": [
"22",
0
],
"sampler": [
"16",
0
],
"sigmas": [
"17",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "SamplerCustomAdvanced"
},
"16": {
"inputs": {
"sampler_name": "euler"
},
"class_type": "KSamplerSelect"
},
"17": {
"inputs": {
"scheduler": "simple",
"steps": 20,
"denoise": 1,
"model": [
"12",
0
]
},
"class_type": "BasicScheduler"
},
"22": {
"inputs": {
"model": [
"12",
0
],
"conditioning": [
"6",
0
]
},
"class_type": "BasicGuider"
},
"25": {
"inputs": {
"noise_seed": 778937779713005
},
"class_type": "RandomNoise"
}
}
"""
def queue_prompt(prompt, client_id, base_url): def queue_prompt(prompt, client_id, base_url):
log.info("queue_prompt") log.info("queue_prompt")
...@@ -194,6 +323,9 @@ class ImageGenerationPayload(BaseModel): ...@@ -194,6 +323,9 @@ class ImageGenerationPayload(BaseModel):
sampler: Optional[str] = None sampler: Optional[str] = None
scheduler: Optional[str] = None scheduler: Optional[str] = None
sd3: Optional[bool] = None sd3: Optional[bool] = None
flux: Optional[bool] = None
flux_weight_dtype: Optional[str] = None
flux_fp8_clip: Optional[bool] = None
def comfyui_generate_image( def comfyui_generate_image(
...@@ -215,21 +347,46 @@ def comfyui_generate_image( ...@@ -215,21 +347,46 @@ def comfyui_generate_image(
if payload.sd3: if payload.sd3:
comfyui_prompt["5"]["class_type"] = "EmptySD3LatentImage" comfyui_prompt["5"]["class_type"] = "EmptySD3LatentImage"
if payload.steps:
comfyui_prompt["3"]["inputs"]["steps"] = payload.steps
comfyui_prompt["4"]["inputs"]["ckpt_name"] = model comfyui_prompt["4"]["inputs"]["ckpt_name"] = model
comfyui_prompt["7"]["inputs"]["text"] = payload.negative_prompt
comfyui_prompt["3"]["inputs"]["seed"] = (
payload.seed if payload.seed else random.randint(0, 18446744073709551614)
)
# as Flux uses a completely different workflow, we must treat it specially
if payload.flux:
comfyui_prompt = json.loads(FLUX_DEFAULT_PROMPT)
comfyui_prompt["12"]["inputs"]["unet_name"] = model
comfyui_prompt["25"]["inputs"]["noise_seed"] = (
payload.seed if payload.seed else random.randint(0, 18446744073709551614)
)
if payload.sampler:
comfyui_prompt["16"]["inputs"]["sampler_name"] = payload.sampler
if payload.steps:
comfyui_prompt["17"]["inputs"]["steps"] = payload.steps
if payload.scheduler:
comfyui_prompt["17"]["inputs"]["scheduler"] = payload.scheduler
if payload.flux_weight_dtype:
comfyui_prompt["12"]["inputs"]["weight_dtype"] = payload.flux_weight_dtype
if payload.flux_fp8_clip:
comfyui_prompt["11"]["inputs"][
"clip_name2"
] = "t5xxl_fp8_e4m3fn.safetensors"
comfyui_prompt["5"]["inputs"]["batch_size"] = payload.n comfyui_prompt["5"]["inputs"]["batch_size"] = payload.n
comfyui_prompt["5"]["inputs"]["width"] = payload.width comfyui_prompt["5"]["inputs"]["width"] = payload.width
comfyui_prompt["5"]["inputs"]["height"] = payload.height comfyui_prompt["5"]["inputs"]["height"] = payload.height
# set the text prompt for our positive CLIPTextEncode # set the text prompt for our positive CLIPTextEncode
comfyui_prompt["6"]["inputs"]["text"] = payload.prompt comfyui_prompt["6"]["inputs"]["text"] = payload.prompt
comfyui_prompt["7"]["inputs"]["text"] = payload.negative_prompt
if payload.steps:
comfyui_prompt["3"]["inputs"]["steps"] = payload.steps
comfyui_prompt["3"]["inputs"]["seed"] = (
payload.seed if payload.seed else random.randint(0, 18446744073709551614)
)
try: try:
ws = websocket.WebSocket() ws = websocket.WebSocket()
......
...@@ -857,6 +857,12 @@ async def generate_chat_completion( ...@@ -857,6 +857,12 @@ async def generate_chat_completion(
): ):
payload["options"]["top_p"] = model_info.params.get("top_p", None) payload["options"]["top_p"] = model_info.params.get("top_p", None)
if (
model_info.params.get("min_p", None)
and payload["options"].get("min_p") is None
):
payload["options"]["min_p"] = model_info.params.get("min_p", None)
if ( if (
model_info.params.get("use_mmap", None) model_info.params.get("use_mmap", None)
and payload["options"].get("use_mmap") is None and payload["options"].get("use_mmap") is None
......
...@@ -52,7 +52,6 @@ async def user_join(sid, data): ...@@ -52,7 +52,6 @@ async def user_join(sid, data):
user = Users.get_user_by_id(data["id"]) user = Users.get_user_by_id(data["id"])
if user: if user:
SESSION_POOL[sid] = user.id SESSION_POOL[sid] = user.id
if user.id in USER_POOL: if user.id in USER_POOL:
USER_POOL[user.id].append(sid) USER_POOL[user.id].append(sid)
...@@ -80,7 +79,6 @@ def get_models_in_use(): ...@@ -80,7 +79,6 @@ def get_models_in_use():
@sio.on("usage") @sio.on("usage")
async def usage(sid, data): async def usage(sid, data):
model_id = data["model"] model_id = data["model"]
# Cancel previous callback if there is one # Cancel previous callback if there is one
...@@ -139,7 +137,7 @@ async def disconnect(sid): ...@@ -139,7 +137,7 @@ async def disconnect(sid):
print(f"Unknown session ID {sid} disconnected") print(f"Unknown session ID {sid} disconnected")
async def get_event_emitter(request_info): def get_event_emitter(request_info):
async def __event_emitter__(event_data): async def __event_emitter__(event_data):
await sio.emit( await sio.emit(
"chat-events", "chat-events",
...@@ -154,7 +152,7 @@ async def get_event_emitter(request_info): ...@@ -154,7 +152,7 @@ async def get_event_emitter(request_info):
return __event_emitter__ return __event_emitter__
async def get_event_call(request_info): def get_event_call(request_info):
async def __event_call__(event_data): async def __event_call__(event_data):
response = await sio.call( response = await sio.call(
"chat-events", "chat-events",
......
This diff is collapsed.
import json
import logging import logging
from typing import Optional from typing import Optional, List
from pydantic import BaseModel, ConfigDict from pydantic import BaseModel, ConfigDict
from sqlalchemy import String, Column, BigInteger, Text from sqlalchemy import Column, BigInteger, Text
from apps.webui.internal.db import Base, JSONField, get_db from apps.webui.internal.db import Base, JSONField, get_db
from typing import List, Union, Optional
from config import SRC_LOG_LEVELS from config import SRC_LOG_LEVELS
import time import time
...@@ -113,7 +111,6 @@ class ModelForm(BaseModel): ...@@ -113,7 +111,6 @@ class ModelForm(BaseModel):
class ModelsTable: class ModelsTable:
def insert_new_model( def insert_new_model(
self, form_data: ModelForm, user_id: str self, form_data: ModelForm, user_id: str
) -> Optional[ModelModel]: ) -> Optional[ModelModel]:
...@@ -126,9 +123,7 @@ class ModelsTable: ...@@ -126,9 +123,7 @@ class ModelsTable:
} }
) )
try: try:
with get_db() as db: with get_db() as db:
result = Model(**model.model_dump()) result = Model(**model.model_dump())
db.add(result) db.add(result)
db.commit() db.commit()
...@@ -144,13 +139,11 @@ class ModelsTable: ...@@ -144,13 +139,11 @@ class ModelsTable:
def get_all_models(self) -> List[ModelModel]: def get_all_models(self) -> List[ModelModel]:
with get_db() as db: with get_db() as db:
return [ModelModel.model_validate(model) for model in db.query(Model).all()] return [ModelModel.model_validate(model) for model in db.query(Model).all()]
def get_model_by_id(self, id: str) -> Optional[ModelModel]: def get_model_by_id(self, id: str) -> Optional[ModelModel]:
try: try:
with get_db() as db: with get_db() as db:
model = db.get(Model, id) model = db.get(Model, id)
return ModelModel.model_validate(model) return ModelModel.model_validate(model)
except: except:
...@@ -178,7 +171,6 @@ class ModelsTable: ...@@ -178,7 +171,6 @@ class ModelsTable:
def delete_model_by_id(self, id: str) -> bool: def delete_model_by_id(self, id: str) -> bool:
try: try:
with get_db() as db: with get_db() as db:
db.query(Model).filter_by(id=id).delete() db.query(Model).filter_by(id=id).delete()
db.commit() db.commit()
......
from pathlib import Path
import site
from fastapi import APIRouter, UploadFile, File, Response from fastapi import APIRouter, UploadFile, File, Response
from fastapi import Depends, HTTPException, status from fastapi import Depends, HTTPException, status
from starlette.responses import StreamingResponse, FileResponse from starlette.responses import StreamingResponse, FileResponse
...@@ -64,8 +67,18 @@ async def download_chat_as_pdf( ...@@ -64,8 +67,18 @@ async def download_chat_as_pdf(
pdf = FPDF() pdf = FPDF()
pdf.add_page() pdf.add_page()
STATIC_DIR = "./static" # When running in docker, workdir is /app/backend, so fonts is in /app/backend/static/fonts
FONTS_DIR = f"{STATIC_DIR}/fonts" FONTS_DIR = Path("./static/fonts")
# Non Docker Installation
# When running using `pip install` the static directory is in the site packages.
if not FONTS_DIR.exists():
FONTS_DIR = Path(site.getsitepackages()[0]) / "static/fonts"
# When running using `pip install -e .` the static directory is in the site packages.
# This path only works if `open-webui serve` is run from the root of this project.
if not FONTS_DIR.exists():
FONTS_DIR = Path("./backend/static/fonts")
pdf.add_font("NotoSans", "", f"{FONTS_DIR}/NotoSans-Regular.ttf") pdf.add_font("NotoSans", "", f"{FONTS_DIR}/NotoSans-Regular.ttf")
pdf.add_font("NotoSans", "b", f"{FONTS_DIR}/NotoSans-Bold.ttf") pdf.add_font("NotoSans", "b", f"{FONTS_DIR}/NotoSans-Bold.ttf")
......
...@@ -349,6 +349,12 @@ GOOGLE_OAUTH_SCOPE = PersistentConfig( ...@@ -349,6 +349,12 @@ GOOGLE_OAUTH_SCOPE = PersistentConfig(
os.environ.get("GOOGLE_OAUTH_SCOPE", "openid email profile"), os.environ.get("GOOGLE_OAUTH_SCOPE", "openid email profile"),
) )
GOOGLE_REDIRECT_URI = PersistentConfig(
"GOOGLE_REDIRECT_URI",
"oauth.google.redirect_uri",
os.environ.get("GOOGLE_REDIRECT_URI", ""),
)
MICROSOFT_CLIENT_ID = PersistentConfig( MICROSOFT_CLIENT_ID = PersistentConfig(
"MICROSOFT_CLIENT_ID", "MICROSOFT_CLIENT_ID",
"oauth.microsoft.client_id", "oauth.microsoft.client_id",
...@@ -373,6 +379,12 @@ MICROSOFT_OAUTH_SCOPE = PersistentConfig( ...@@ -373,6 +379,12 @@ MICROSOFT_OAUTH_SCOPE = PersistentConfig(
os.environ.get("MICROSOFT_OAUTH_SCOPE", "openid email profile"), os.environ.get("MICROSOFT_OAUTH_SCOPE", "openid email profile"),
) )
MICROSOFT_REDIRECT_URI = PersistentConfig(
"MICROSOFT_REDIRECT_URI",
"oauth.microsoft.redirect_uri",
os.environ.get("MICROSOFT_REDIRECT_URI", ""),
)
OAUTH_CLIENT_ID = PersistentConfig( OAUTH_CLIENT_ID = PersistentConfig(
"OAUTH_CLIENT_ID", "OAUTH_CLIENT_ID",
"oauth.oidc.client_id", "oauth.oidc.client_id",
...@@ -391,6 +403,12 @@ OPENID_PROVIDER_URL = PersistentConfig( ...@@ -391,6 +403,12 @@ OPENID_PROVIDER_URL = PersistentConfig(
os.environ.get("OPENID_PROVIDER_URL", ""), os.environ.get("OPENID_PROVIDER_URL", ""),
) )
OPENID_REDIRECT_URI = PersistentConfig(
"OPENID_REDIRECT_URI",
"oauth.oidc.redirect_uri",
os.environ.get("OPENID_REDIRECT_URI", ""),
)
OAUTH_SCOPES = PersistentConfig( OAUTH_SCOPES = PersistentConfig(
"OAUTH_SCOPES", "OAUTH_SCOPES",
"oauth.oidc.scopes", "oauth.oidc.scopes",
...@@ -424,6 +442,7 @@ def load_oauth_providers(): ...@@ -424,6 +442,7 @@ def load_oauth_providers():
"client_secret": GOOGLE_CLIENT_SECRET.value, "client_secret": GOOGLE_CLIENT_SECRET.value,
"server_metadata_url": "https://accounts.google.com/.well-known/openid-configuration", "server_metadata_url": "https://accounts.google.com/.well-known/openid-configuration",
"scope": GOOGLE_OAUTH_SCOPE.value, "scope": GOOGLE_OAUTH_SCOPE.value,
"redirect_uri": GOOGLE_REDIRECT_URI.value,
} }
if ( if (
...@@ -436,6 +455,7 @@ def load_oauth_providers(): ...@@ -436,6 +455,7 @@ def load_oauth_providers():
"client_secret": MICROSOFT_CLIENT_SECRET.value, "client_secret": MICROSOFT_CLIENT_SECRET.value,
"server_metadata_url": f"https://login.microsoftonline.com/{MICROSOFT_CLIENT_TENANT_ID.value}/v2.0/.well-known/openid-configuration", "server_metadata_url": f"https://login.microsoftonline.com/{MICROSOFT_CLIENT_TENANT_ID.value}/v2.0/.well-known/openid-configuration",
"scope": MICROSOFT_OAUTH_SCOPE.value, "scope": MICROSOFT_OAUTH_SCOPE.value,
"redirect_uri": MICROSOFT_REDIRECT_URI.value,
} }
if ( if (
...@@ -449,6 +469,7 @@ def load_oauth_providers(): ...@@ -449,6 +469,7 @@ def load_oauth_providers():
"server_metadata_url": OPENID_PROVIDER_URL.value, "server_metadata_url": OPENID_PROVIDER_URL.value,
"scope": OAUTH_SCOPES.value, "scope": OAUTH_SCOPES.value,
"name": OAUTH_PROVIDER_NAME.value, "name": OAUTH_PROVIDER_NAME.value,
"redirect_uri": OPENID_REDIRECT_URI.value,
} }
...@@ -1281,6 +1302,24 @@ COMFYUI_SD3 = PersistentConfig( ...@@ -1281,6 +1302,24 @@ COMFYUI_SD3 = PersistentConfig(
os.environ.get("COMFYUI_SD3", "").lower() == "true", os.environ.get("COMFYUI_SD3", "").lower() == "true",
) )
COMFYUI_FLUX = PersistentConfig(
"COMFYUI_FLUX",
"image_generation.comfyui.flux",
os.environ.get("COMFYUI_FLUX", "").lower() == "true",
)
COMFYUI_FLUX_WEIGHT_DTYPE = PersistentConfig(
"COMFYUI_FLUX_WEIGHT_DTYPE",
"image_generation.comfyui.flux_weight_dtype",
os.getenv("COMFYUI_FLUX_WEIGHT_DTYPE", ""),
)
COMFYUI_FLUX_FP8_CLIP = PersistentConfig(
"COMFYUI_FLUX_FP8_CLIP",
"image_generation.comfyui.flux_fp8_clip",
os.getenv("COMFYUI_FLUX_FP8_CLIP", ""),
)
IMAGES_OPENAI_API_BASE_URL = PersistentConfig( IMAGES_OPENAI_API_BASE_URL = PersistentConfig(
"IMAGES_OPENAI_API_BASE_URL", "IMAGES_OPENAI_API_BASE_URL",
"image_generation.openai.api_base_url", "image_generation.openai.api_base_url",
......
...@@ -13,8 +13,6 @@ import aiohttp ...@@ -13,8 +13,6 @@ import aiohttp
import requests import requests
import mimetypes import mimetypes
import shutil import shutil
import os
import uuid
import inspect import inspect
from fastapi import FastAPI, Request, Depends, status, UploadFile, File, Form from fastapi import FastAPI, Request, Depends, status, UploadFile, File, Form
...@@ -29,7 +27,7 @@ from starlette.middleware.sessions import SessionMiddleware ...@@ -29,7 +27,7 @@ from starlette.middleware.sessions import SessionMiddleware
from starlette.responses import StreamingResponse, Response, RedirectResponse from starlette.responses import StreamingResponse, Response, RedirectResponse
from apps.socket.main import sio, app as socket_app, get_event_emitter, get_event_call from apps.socket.main import app as socket_app, get_event_emitter, get_event_call
from apps.ollama.main import ( from apps.ollama.main import (
app as ollama_app, app as ollama_app,
get_all_models as get_ollama_models, get_all_models as get_ollama_models,
...@@ -619,32 +617,15 @@ class ChatCompletionMiddleware(BaseHTTPMiddleware): ...@@ -619,32 +617,15 @@ class ChatCompletionMiddleware(BaseHTTPMiddleware):
content={"detail": str(e)}, content={"detail": str(e)},
) )
# Extract valves from the request body metadata = {
valves = None "chat_id": body.pop("chat_id", None),
if "valves" in body: "message_id": body.pop("id", None),
valves = body["valves"] "session_id": body.pop("session_id", None),
del body["valves"] "valves": body.pop("valves", None),
}
# Extract session_id, chat_id and message_id from the request body
session_id = None __event_emitter__ = get_event_emitter(metadata)
if "session_id" in body: __event_call__ = get_event_call(metadata)
session_id = body["session_id"]
del body["session_id"]
chat_id = None
if "chat_id" in body:
chat_id = body["chat_id"]
del body["chat_id"]
message_id = None
if "id" in body:
message_id = body["id"]
del body["id"]
__event_emitter__ = await get_event_emitter(
{"chat_id": chat_id, "message_id": message_id, "session_id": session_id}
)
__event_call__ = await get_event_call(
{"chat_id": chat_id, "message_id": message_id, "session_id": session_id}
)
# Initialize data_items to store additional data to be sent to the client # Initialize data_items to store additional data to be sent to the client
data_items = [] data_items = []
...@@ -709,13 +690,7 @@ class ChatCompletionMiddleware(BaseHTTPMiddleware): ...@@ -709,13 +690,7 @@ class ChatCompletionMiddleware(BaseHTTPMiddleware):
if len(citations) > 0: if len(citations) > 0:
data_items.append({"citations": citations}) data_items.append({"citations": citations})
body["metadata"] = { body["metadata"] = metadata
"session_id": session_id,
"chat_id": chat_id,
"message_id": message_id,
"valves": valves,
}
modified_body_bytes = json.dumps(body).encode("utf-8") modified_body_bytes = json.dumps(body).encode("utf-8")
# Replace the request body with the modified one # Replace the request body with the modified one
request._body = modified_body_bytes request._body = modified_body_bytes
...@@ -1191,13 +1166,13 @@ async def chat_completed(form_data: dict, user=Depends(get_verified_user)): ...@@ -1191,13 +1166,13 @@ async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
status_code=r.status_code, status_code=r.status_code,
content=res, content=res,
) )
except: except Exception:
pass pass
else: else:
pass pass
__event_emitter__ = await get_event_emitter( __event_emitter__ = get_event_emitter(
{ {
"chat_id": data["chat_id"], "chat_id": data["chat_id"],
"message_id": data["id"], "message_id": data["id"],
...@@ -1205,7 +1180,7 @@ async def chat_completed(form_data: dict, user=Depends(get_verified_user)): ...@@ -1205,7 +1180,7 @@ async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
} }
) )
__event_call__ = await get_event_call( __event_call__ = get_event_call(
{ {
"chat_id": data["chat_id"], "chat_id": data["chat_id"],
"message_id": data["id"], "message_id": data["id"],
...@@ -1310,9 +1285,7 @@ async def chat_completed(form_data: dict, user=Depends(get_verified_user)): ...@@ -1310,9 +1285,7 @@ async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
@app.post("/api/chat/actions/{action_id}") @app.post("/api/chat/actions/{action_id}")
async def chat_completed( async def chat_action(action_id: str, form_data: dict, user=Depends(get_verified_user)):
action_id: str, form_data: dict, user=Depends(get_verified_user)
):
if "." in action_id: if "." in action_id:
action_id, sub_action_id = action_id.split(".") action_id, sub_action_id = action_id.split(".")
else: else:
...@@ -1334,14 +1307,14 @@ async def chat_completed( ...@@ -1334,14 +1307,14 @@ async def chat_completed(
) )
model = app.state.MODELS[model_id] model = app.state.MODELS[model_id]
__event_emitter__ = await get_event_emitter( __event_emitter__ = get_event_emitter(
{ {
"chat_id": data["chat_id"], "chat_id": data["chat_id"],
"message_id": data["id"], "message_id": data["id"],
"session_id": data["session_id"], "session_id": data["session_id"],
} }
) )
__event_call__ = await get_event_call( __event_call__ = get_event_call(
{ {
"chat_id": data["chat_id"], "chat_id": data["chat_id"],
"message_id": data["id"], "message_id": data["id"],
...@@ -1770,7 +1743,6 @@ class AddPipelineForm(BaseModel): ...@@ -1770,7 +1743,6 @@ class AddPipelineForm(BaseModel):
@app.post("/api/pipelines/add") @app.post("/api/pipelines/add")
async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)): async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
r = None r = None
try: try:
urlIdx = form_data.urlIdx urlIdx = form_data.urlIdx
...@@ -1813,7 +1785,6 @@ class DeletePipelineForm(BaseModel): ...@@ -1813,7 +1785,6 @@ class DeletePipelineForm(BaseModel):
@app.delete("/api/pipelines/delete") @app.delete("/api/pipelines/delete")
async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)): async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
r = None r = None
try: try:
urlIdx = form_data.urlIdx urlIdx = form_data.urlIdx
...@@ -1891,7 +1862,6 @@ async def get_pipeline_valves( ...@@ -1891,7 +1862,6 @@ async def get_pipeline_valves(
models = await get_all_models() models = await get_all_models()
r = None r = None
try: try:
url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx] url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
key = openai_app.state.config.OPENAI_API_KEYS[urlIdx] key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
...@@ -2143,6 +2113,7 @@ for provider_name, provider_config in OAUTH_PROVIDERS.items(): ...@@ -2143,6 +2113,7 @@ for provider_name, provider_config in OAUTH_PROVIDERS.items():
client_kwargs={ client_kwargs={
"scope": provider_config["scope"], "scope": provider_config["scope"],
}, },
redirect_uri=provider_config["redirect_uri"],
) )
# SessionMiddleware is used by authlib for oauth # SessionMiddleware is used by authlib for oauth
...@@ -2160,7 +2131,10 @@ if len(OAUTH_PROVIDERS) > 0: ...@@ -2160,7 +2131,10 @@ if len(OAUTH_PROVIDERS) > 0:
async def oauth_login(provider: str, request: Request): async def oauth_login(provider: str, request: Request):
if provider not in OAUTH_PROVIDERS: if provider not in OAUTH_PROVIDERS:
raise HTTPException(404) raise HTTPException(404)
redirect_uri = request.url_for("oauth_callback", provider=provider) # If the provider has a custom redirect URL, use that, otherwise automatically generate one
redirect_uri = OAUTH_PROVIDERS[provider].get("redirect_uri") or request.url_for(
"oauth_callback", provider=provider
)
return await oauth.create_client(provider).authorize_redirect(request, redirect_uri) return await oauth.create_client(provider).authorize_redirect(request, redirect_uri)
......
...@@ -12,6 +12,7 @@ passlib[bcrypt]==1.7.4 ...@@ -12,6 +12,7 @@ passlib[bcrypt]==1.7.4
requests==2.32.3 requests==2.32.3
aiohttp==3.9.5 aiohttp==3.9.5
sqlalchemy==2.0.31 sqlalchemy==2.0.31
alembic==1.13.2 alembic==1.13.2
peewee==3.17.6 peewee==3.17.6
...@@ -19,7 +20,7 @@ peewee-migrate==1.12.2 ...@@ -19,7 +20,7 @@ peewee-migrate==1.12.2
psycopg2-binary==2.9.9 psycopg2-binary==2.9.9
PyMySQL==1.1.1 PyMySQL==1.1.1
bcrypt==4.1.3 bcrypt==4.1.3
SQLAlchemy
pymongo pymongo
redis redis
boto3==1.34.110 boto3==1.34.110
......
from pathlib import Path from pathlib import Path
import hashlib import hashlib
import json
import re import re
from datetime import timedelta from datetime import timedelta
from typing import Optional, List, Tuple from typing import Optional, List, Tuple
...@@ -8,37 +7,39 @@ import uuid ...@@ -8,37 +7,39 @@ import uuid
import time import time
def get_last_user_message_item(messages: List[dict]) -> str: def get_last_user_message_item(messages: List[dict]) -> Optional[dict]:
for message in reversed(messages): for message in reversed(messages):
if message["role"] == "user": if message["role"] == "user":
return message return message
return None return None
def get_last_user_message(messages: List[dict]) -> str: def get_content_from_message(message: dict) -> Optional[str]:
message = get_last_user_message_item(messages) if isinstance(message["content"], list):
for item in message["content"]:
if message is not None: if item["type"] == "text":
if isinstance(message["content"], list): return item["text"]
for item in message["content"]: else:
if item["type"] == "text":
return item["text"]
return message["content"] return message["content"]
return None return None
def get_last_assistant_message(messages: List[dict]) -> str: def get_last_user_message(messages: List[dict]) -> Optional[str]:
message = get_last_user_message_item(messages)
if message is None:
return None
return get_content_from_message(message)
def get_last_assistant_message(messages: List[dict]) -> Optional[str]:
for message in reversed(messages): for message in reversed(messages):
if message["role"] == "assistant": if message["role"] == "assistant":
if isinstance(message["content"], list): return get_content_from_message(message)
for item in message["content"]:
if item["type"] == "text":
return item["text"]
return message["content"]
return None return None
def get_system_message(messages: List[dict]) -> dict: def get_system_message(messages: List[dict]) -> Optional[dict]:
for message in messages: for message in messages:
if message["role"] == "system": if message["role"] == "system":
return message return message
...@@ -49,7 +50,7 @@ def remove_system_message(messages: List[dict]) -> List[dict]: ...@@ -49,7 +50,7 @@ def remove_system_message(messages: List[dict]) -> List[dict]:
return [message for message in messages if message["role"] != "system"] return [message for message in messages if message["role"] != "system"]
def pop_system_message(messages: List[dict]) -> Tuple[dict, List[dict]]: def pop_system_message(messages: List[dict]) -> Tuple[Optional[dict], List[dict]]:
return get_system_message(messages), remove_system_message(messages) return get_system_message(messages), remove_system_message(messages)
...@@ -87,23 +88,29 @@ def add_or_update_system_message(content: str, messages: List[dict]): ...@@ -87,23 +88,29 @@ def add_or_update_system_message(content: str, messages: List[dict]):
return messages return messages
def stream_message_template(model: str, message: str): def openai_chat_message_template(model: str):
return { return {
"id": f"{model}-{str(uuid.uuid4())}", "id": f"{model}-{str(uuid.uuid4())}",
"object": "chat.completion.chunk",
"created": int(time.time()), "created": int(time.time()),
"model": model, "model": model,
"choices": [ "choices": [{"index": 0, "logprobs": None, "finish_reason": None}],
{
"index": 0,
"delta": {"content": message},
"logprobs": None,
"finish_reason": None,
}
],
} }
def openai_chat_chunk_message_template(model: str, message: str):
template = openai_chat_message_template(model)
template["object"] = "chat.completion.chunk"
template["choices"][0]["delta"] = {"content": message}
return template
def openai_chat_completion_message_template(model: str, message: str):
template = openai_chat_message_template(model)
template["object"] = "chat.completion"
template["choices"][0]["message"] = {"content": message, "role": "assistant"}
template["choices"][0]["finish_reason"] = "stop"
def get_gravatar_url(email): def get_gravatar_url(email):
# Trim leading and trailing whitespace from # Trim leading and trailing whitespace from
# an email address and force all characters # an email address and force all characters
...@@ -174,7 +181,7 @@ def extract_folders_after_data_docs(path): ...@@ -174,7 +181,7 @@ def extract_folders_after_data_docs(path):
tags = [] tags = []
folders = parts[index_docs:-1] folders = parts[index_docs:-1]
for idx, part in enumerate(folders): for idx, _ in enumerate(folders):
tags.append("/".join(folders[: idx + 1])) tags.append("/".join(folders[: idx + 1]))
return tags return tags
...@@ -270,11 +277,11 @@ def parse_ollama_modelfile(model_text): ...@@ -270,11 +277,11 @@ def parse_ollama_modelfile(model_text):
value = param_match.group(1) value = param_match.group(1)
try: try:
if param_type == int: if param_type is int:
value = int(value) value = int(value)
elif param_type == float: elif param_type is float:
value = float(value) value = float(value)
elif param_type == bool: elif param_type is bool:
value = value.lower() == "true" value = value.lower() == "true"
except Exception as e: except Exception as e:
print(e) print(e)
......
{ {
"name": "open-webui", "name": "open-webui",
"version": "0.3.10", "version": "0.3.11",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "open-webui", "name": "open-webui",
"version": "0.3.10", "version": "0.3.11",
"dependencies": { "dependencies": {
"@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-javascript": "^6.2.2",
"@codemirror/lang-python": "^6.1.6", "@codemirror/lang-python": "^6.1.6",
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
"dayjs": "^1.11.10", "dayjs": "^1.11.10",
"eventsource-parser": "^1.1.2", "eventsource-parser": "^1.1.2",
"file-saver": "^2.0.5", "file-saver": "^2.0.5",
"fuse.js": "^7.0.0",
"highlight.js": "^11.9.0", "highlight.js": "^11.9.0",
"i18next": "^23.10.0", "i18next": "^23.10.0",
"i18next-browser-languagedetector": "^7.2.0", "i18next-browser-languagedetector": "^7.2.0",
...@@ -4820,6 +4821,14 @@ ...@@ -4820,6 +4821,14 @@
"url": "https://github.com/sponsors/ljharb" "url": "https://github.com/sponsors/ljharb"
} }
}, },
"node_modules/fuse.js": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-7.0.0.tgz",
"integrity": "sha512-14F4hBIxqKvD4Zz/XjDc3y94mNZN6pRv3U13Udo0lNLCWRBUsrMv2xwcF/y/Z5sV6+FQW+/ow68cHpm4sunt8Q==",
"engines": {
"node": ">=10"
}
},
"node_modules/gc-hook": { "node_modules/gc-hook": {
"version": "0.3.1", "version": "0.3.1",
"resolved": "https://registry.npmjs.org/gc-hook/-/gc-hook-0.3.1.tgz", "resolved": "https://registry.npmjs.org/gc-hook/-/gc-hook-0.3.1.tgz",
......
{ {
"name": "open-webui", "name": "open-webui",
"version": "0.3.10", "version": "0.3.11",
"private": true, "private": true,
"scripts": { "scripts": {
"dev": "npm run pyodide:fetch && vite dev --host", "dev": "npm run pyodide:fetch && vite dev --host",
...@@ -60,6 +60,7 @@ ...@@ -60,6 +60,7 @@
"dayjs": "^1.11.10", "dayjs": "^1.11.10",
"eventsource-parser": "^1.1.2", "eventsource-parser": "^1.1.2",
"file-saver": "^2.0.5", "file-saver": "^2.0.5",
"fuse.js": "^7.0.0",
"highlight.js": "^11.9.0", "highlight.js": "^11.9.0",
"i18next": "^23.10.0", "i18next": "^23.10.0",
"i18next-browser-languagedetector": "^7.2.0", "i18next-browser-languagedetector": "^7.2.0",
......
...@@ -8,50 +8,61 @@ license = { file = "LICENSE" } ...@@ -8,50 +8,61 @@ license = { file = "LICENSE" }
dependencies = [ dependencies = [
"fastapi==0.111.0", "fastapi==0.111.0",
"uvicorn[standard]==0.22.0", "uvicorn[standard]==0.22.0",
"pydantic==2.7.1", "pydantic==2.8.2",
"python-multipart==0.0.9", "python-multipart==0.0.9",
"Flask==3.0.3", "Flask==3.0.3",
"Flask-Cors==4.0.1", "Flask-Cors==4.0.1",
"python-socketio==5.11.2", "python-socketio==5.11.3",
"python-jose==3.3.0", "python-jose==3.3.0",
"passlib[bcrypt]==1.7.4", "passlib[bcrypt]==1.7.4",
"requests==2.32.2", "requests==2.32.3",
"aiohttp==3.9.5", "aiohttp==3.9.5",
"peewee==3.17.5",
"sqlalchemy==2.0.31",
"alembic==1.13.2",
"peewee==3.17.6",
"peewee-migrate==1.12.2", "peewee-migrate==1.12.2",
"psycopg2-binary==2.9.9", "psycopg2-binary==2.9.9",
"PyMySQL==1.1.1", "PyMySQL==1.1.1",
"bcrypt==4.1.3", "bcrypt==4.1.3",
"pymongo",
"redis",
"boto3==1.34.110", "boto3==1.34.110",
"argon2-cffi==23.1.0", "argon2-cffi==23.1.0",
"APScheduler==3.10.4", "APScheduler==3.10.4",
"openai",
"anthropic",
"google-generativeai==0.5.4", "google-generativeai==0.5.4",
"tiktoken",
"langchain==0.2.0", "langchain==0.2.11",
"langchain-community==0.2.9", "langchain-community==0.2.10",
"langchain-chroma==0.1.1", "langchain-chroma==0.1.2",
"fake-useragent==1.5.1", "fake-useragent==1.5.1",
"chromadb==0.5.0", "chromadb==0.5.4",
"sentence-transformers==2.7.0", "sentence-transformers==3.0.1",
"pypdf==4.2.0", "pypdf==4.2.0",
"docx2txt==0.8", "docx2txt==0.8",
"unstructured==0.14.0", "python-pptx==0.6.23",
"unstructured==0.15.0",
"Markdown==3.6", "Markdown==3.6",
"pypandoc==1.13", "pypandoc==1.13",
"pandas==2.2.2", "pandas==2.2.2",
"openpyxl==3.1.2", "openpyxl==3.1.5",
"pyxlsb==1.0.10", "pyxlsb==1.0.10",
"xlrd==2.0.1", "xlrd==2.0.1",
"validators==0.28.1", "validators==0.28.1",
"psutil",
"opencv-python-headless==4.9.0.80", "opencv-python-headless==4.10.0.84",
"rapidocr-onnxruntime==1.3.22", "rapidocr-onnxruntime==1.3.24",
"fpdf2==2.7.9", "fpdf2==2.7.9",
"rank-bm25==0.2.2", "rank-bm25==0.2.2",
...@@ -62,13 +73,17 @@ dependencies = [ ...@@ -62,13 +73,17 @@ dependencies = [
"authlib==1.3.1", "authlib==1.3.1",
"black==24.4.2", "black==24.4.2",
"langfuse==2.33.0", "langfuse==2.39.2",
"youtube-transcript-api==0.6.2", "youtube-transcript-api==0.6.2",
"pytube==15.0.0", "pytube==15.0.0",
"extract_msg", "extract_msg",
"pydub", "pydub",
"duckduckgo-search~=6.1.5" "duckduckgo-search~=6.2.1",
"docker~=7.1.0",
"pytest~=8.2.2",
"pytest-docker~=3.1.1"
] ]
readme = "README.md" readme = "README.md"
requires-python = ">= 3.11, < 3.12.0a1" requires-python = ">= 3.11, < 3.12.0a1"
......
...@@ -16,10 +16,17 @@ aiohttp==3.9.5 ...@@ -16,10 +16,17 @@ aiohttp==3.9.5
# via open-webui # via open-webui
aiosignal==1.3.1 aiosignal==1.3.1
# via aiohttp # via aiohttp
alembic==1.13.2
# via open-webui
annotated-types==0.6.0 annotated-types==0.6.0
# via pydantic # via pydantic
anyio==4.3.0 anthropic==0.32.0
# via open-webui
anyio==4.4.0
# via anthropic
# via httpx # via httpx
# via langfuse
# via openai
# via starlette # via starlette
# via watchfiles # via watchfiles
apscheduler==3.10.4 apscheduler==3.10.4
...@@ -32,6 +39,7 @@ asgiref==3.8.1 ...@@ -32,6 +39,7 @@ asgiref==3.8.1
# via opentelemetry-instrumentation-asgi # via opentelemetry-instrumentation-asgi
attrs==23.2.0 attrs==23.2.0
# via aiohttp # via aiohttp
# via pytest-docker
authlib==1.3.1 authlib==1.3.1
# via open-webui # via open-webui
av==11.0.0 av==11.0.0
...@@ -76,9 +84,9 @@ chardet==5.2.0 ...@@ -76,9 +84,9 @@ chardet==5.2.0
charset-normalizer==3.3.2 charset-normalizer==3.3.2
# via requests # via requests
# via unstructured-client # via unstructured-client
chroma-hnswlib==0.7.3 chroma-hnswlib==0.7.5
# via chromadb # via chromadb
chromadb==0.5.0 chromadb==0.5.4
# via langchain-chroma # via langchain-chroma
# via open-webui # via open-webui
click==8.1.7 click==8.1.7
...@@ -102,7 +110,6 @@ cryptography==42.0.7 ...@@ -102,7 +110,6 @@ cryptography==42.0.7
ctranslate2==4.2.1 ctranslate2==4.2.1
# via faster-whisper # via faster-whisper
dataclasses-json==0.6.6 dataclasses-json==0.6.6
# via langchain
# via langchain-community # via langchain-community
# via unstructured # via unstructured
# via unstructured-client # via unstructured-client
...@@ -113,11 +120,17 @@ defusedxml==0.7.1 ...@@ -113,11 +120,17 @@ defusedxml==0.7.1
deprecated==1.2.14 deprecated==1.2.14
# via opentelemetry-api # via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc # via opentelemetry-exporter-otlp-proto-grpc
distro==1.9.0
# via anthropic
# via openai
dnspython==2.6.1 dnspython==2.6.1
# via email-validator # via email-validator
# via pymongo
docker==7.1.0
# via open-webui
docx2txt==0.8 docx2txt==0.8
# via open-webui # via open-webui
duckduckgo-search==6.1.5 duckduckgo-search==6.2.6
# via open-webui # via open-webui
easygui==0.98.3 easygui==0.98.3
# via oletools # via oletools
...@@ -208,8 +221,11 @@ httplib2==0.22.0 ...@@ -208,8 +221,11 @@ httplib2==0.22.0
httptools==0.6.1 httptools==0.6.1
# via uvicorn # via uvicorn
httpx==0.27.0 httpx==0.27.0
# via anthropic
# via chromadb
# via fastapi # via fastapi
# via langfuse # via langfuse
# via openai
huggingface-hub==0.23.0 huggingface-hub==0.23.0
# via faster-whisper # via faster-whisper
# via sentence-transformers # via sentence-transformers
...@@ -229,12 +245,16 @@ importlib-metadata==7.0.0 ...@@ -229,12 +245,16 @@ importlib-metadata==7.0.0
# via opentelemetry-api # via opentelemetry-api
importlib-resources==6.4.0 importlib-resources==6.4.0
# via chromadb # via chromadb
iniconfig==2.0.0
# via pytest
itsdangerous==2.2.0 itsdangerous==2.2.0
# via flask # via flask
jinja2==3.1.4 jinja2==3.1.4
# via fastapi # via fastapi
# via flask # via flask
# via torch # via torch
jiter==0.5.0
# via anthropic
jmespath==1.0.1 jmespath==1.0.1
# via boto3 # via boto3
# via botocore # via botocore
...@@ -249,14 +269,14 @@ jsonpointer==2.4 ...@@ -249,14 +269,14 @@ jsonpointer==2.4
# via jsonpatch # via jsonpatch
kubernetes==29.0.0 kubernetes==29.0.0
# via chromadb # via chromadb
langchain==0.2.0 langchain==0.2.11
# via langchain-community # via langchain-community
# via open-webui # via open-webui
langchain-chroma==0.1.1 langchain-chroma==0.1.2
# via open-webui # via open-webui
langchain-community==0.2.0 langchain-community==0.2.10
# via open-webui # via open-webui
langchain-core==0.2.1 langchain-core==0.2.28
# via langchain # via langchain
# via langchain-chroma # via langchain-chroma
# via langchain-community # via langchain-community
...@@ -265,22 +285,26 @@ langchain-text-splitters==0.2.0 ...@@ -265,22 +285,26 @@ langchain-text-splitters==0.2.0
# via langchain # via langchain
langdetect==1.0.9 langdetect==1.0.9
# via unstructured # via unstructured
langfuse==2.33.0 langfuse==2.39.2
# via open-webui # via open-webui
langsmith==0.1.57 langsmith==0.1.96
# via langchain # via langchain
# via langchain-community # via langchain-community
# via langchain-core # via langchain-core
lark==1.1.8 lark==1.1.8
# via rtfde # via rtfde
lxml==5.2.2 lxml==5.2.2
# via python-pptx
# via unstructured # via unstructured
mako==1.3.5
# via alembic
markdown==3.6 markdown==3.6
# via open-webui # via open-webui
markdown-it-py==3.0.0 markdown-it-py==3.0.0
# via rich # via rich
markupsafe==2.1.5 markupsafe==2.1.5
# via jinja2 # via jinja2
# via mako
# via werkzeug # via werkzeug
marshmallow==3.21.2 marshmallow==3.21.2
# via dataclasses-json # via dataclasses-json
...@@ -339,11 +363,13 @@ onnxruntime==1.17.3 ...@@ -339,11 +363,13 @@ onnxruntime==1.17.3
# via chromadb # via chromadb
# via faster-whisper # via faster-whisper
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
openai==1.38.0
# via open-webui
opencv-python==4.9.0.80 opencv-python==4.9.0.80
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
opencv-python-headless==4.9.0.80 opencv-python-headless==4.10.0.84
# via open-webui # via open-webui
openpyxl==3.1.2 openpyxl==3.1.5
# via open-webui # via open-webui
opentelemetry-api==1.24.0 opentelemetry-api==1.24.0
# via chromadb # via chromadb
...@@ -380,7 +406,6 @@ ordered-set==4.1.0 ...@@ -380,7 +406,6 @@ ordered-set==4.1.0
# via deepdiff # via deepdiff
orjson==3.10.3 orjson==3.10.3
# via chromadb # via chromadb
# via duckduckgo-search
# via fastapi # via fastapi
# via langsmith # via langsmith
overrides==7.7.0 overrides==7.7.0
...@@ -393,6 +418,7 @@ packaging==23.2 ...@@ -393,6 +418,7 @@ packaging==23.2
# via langfuse # via langfuse
# via marshmallow # via marshmallow
# via onnxruntime # via onnxruntime
# via pytest
# via transformers # via transformers
# via unstructured-client # via unstructured-client
pandas==2.2.2 pandas==2.2.2
...@@ -403,19 +429,24 @@ pathspec==0.12.1 ...@@ -403,19 +429,24 @@ pathspec==0.12.1
# via black # via black
pcodedmp==1.2.6 pcodedmp==1.2.6
# via oletools # via oletools
peewee==3.17.5 peewee==3.17.6
# via open-webui # via open-webui
# via peewee-migrate # via peewee-migrate
peewee-migrate==1.12.2 peewee-migrate==1.12.2
# via open-webui # via open-webui
pillow==10.3.0 pillow==10.3.0
# via fpdf2 # via fpdf2
# via python-pptx
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
# via sentence-transformers # via sentence-transformers
platformdirs==4.2.1 platformdirs==4.2.1
# via black # via black
pluggy==1.5.0
# via pytest
posthog==3.5.0 posthog==3.5.0
# via chromadb # via chromadb
primp==0.5.5
# via duckduckgo-search
proto-plus==1.23.0 proto-plus==1.23.0
# via google-ai-generativelanguage # via google-ai-generativelanguage
# via google-api-core # via google-api-core
...@@ -428,6 +459,9 @@ protobuf==4.25.3 ...@@ -428,6 +459,9 @@ protobuf==4.25.3
# via onnxruntime # via onnxruntime
# via opentelemetry-proto # via opentelemetry-proto
# via proto-plus # via proto-plus
psutil==6.0.0
# via open-webui
# via unstructured
psycopg2-binary==2.9.9 psycopg2-binary==2.9.9
# via open-webui # via open-webui
pyasn1==0.6.0 pyasn1==0.6.0
...@@ -440,7 +474,8 @@ pyclipper==1.3.0.post5 ...@@ -440,7 +474,8 @@ pyclipper==1.3.0.post5
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
pycparser==2.22 pycparser==2.22
# via cffi # via cffi
pydantic==2.7.1 pydantic==2.8.2
# via anthropic
# via chromadb # via chromadb
# via fastapi # via fastapi
# via google-generativeai # via google-generativeai
...@@ -449,7 +484,8 @@ pydantic==2.7.1 ...@@ -449,7 +484,8 @@ pydantic==2.7.1
# via langfuse # via langfuse
# via langsmith # via langsmith
# via open-webui # via open-webui
pydantic-core==2.18.2 # via openai
pydantic-core==2.20.1
# via pydantic # via pydantic
pydub==0.25.1 pydub==0.25.1
# via open-webui # via open-webui
...@@ -457,7 +493,9 @@ pygments==2.18.0 ...@@ -457,7 +493,9 @@ pygments==2.18.0
# via rich # via rich
pyjwt==2.8.0 pyjwt==2.8.0
# via open-webui # via open-webui
pymysql==1.1.0 pymongo==4.8.0
# via open-webui
pymysql==1.1.1
# via open-webui # via open-webui
pypandoc==1.13 pypandoc==1.13
# via open-webui # via open-webui
...@@ -471,8 +509,11 @@ pypika==0.48.9 ...@@ -471,8 +509,11 @@ pypika==0.48.9
# via chromadb # via chromadb
pyproject-hooks==1.1.0 pyproject-hooks==1.1.0
# via build # via build
pyreqwest-impersonate==0.4.7 pytest==8.2.2
# via duckduckgo-search # via open-webui
# via pytest-docker
pytest-docker==3.1.1
# via open-webui
python-dateutil==2.9.0.post0 python-dateutil==2.9.0.post0
# via botocore # via botocore
# via kubernetes # via kubernetes
...@@ -492,7 +533,9 @@ python-magic==0.4.27 ...@@ -492,7 +533,9 @@ python-magic==0.4.27
python-multipart==0.0.9 python-multipart==0.0.9
# via fastapi # via fastapi
# via open-webui # via open-webui
python-socketio==5.11.2 python-pptx==0.6.23
# via open-webui
python-socketio==5.11.3
# via open-webui # via open-webui
pytube==15.0.0 pytube==15.0.0
# via open-webui # via open-webui
...@@ -516,15 +559,18 @@ rank-bm25==0.2.2 ...@@ -516,15 +559,18 @@ rank-bm25==0.2.2
# via open-webui # via open-webui
rapidfuzz==3.9.0 rapidfuzz==3.9.0
# via unstructured # via unstructured
rapidocr-onnxruntime==1.3.22 rapidocr-onnxruntime==1.3.24
# via open-webui # via open-webui
red-black-tree-mod==1.20 red-black-tree-mod==1.20
# via extract-msg # via extract-msg
redis==5.0.8
# via open-webui
regex==2024.5.10 regex==2024.5.10
# via nltk # via nltk
# via tiktoken
# via transformers # via transformers
requests==2.32.2 requests==2.32.3
# via chromadb # via docker
# via google-api-core # via google-api-core
# via huggingface-hub # via huggingface-hub
# via kubernetes # via kubernetes
...@@ -534,6 +580,7 @@ requests==2.32.2 ...@@ -534,6 +580,7 @@ requests==2.32.2
# via open-webui # via open-webui
# via posthog # via posthog
# via requests-oauthlib # via requests-oauthlib
# via tiktoken
# via transformers # via transformers
# via unstructured # via unstructured
# via unstructured-client # via unstructured-client
...@@ -556,12 +603,12 @@ scikit-learn==1.4.2 ...@@ -556,12 +603,12 @@ scikit-learn==1.4.2
scipy==1.13.0 scipy==1.13.0
# via scikit-learn # via scikit-learn
# via sentence-transformers # via sentence-transformers
sentence-transformers==2.7.0 sentence-transformers==3.0.1
# via open-webui # via open-webui
setuptools==69.5.1 setuptools==69.5.1
# via ctranslate2 # via ctranslate2
# via opentelemetry-instrumentation # via opentelemetry-instrumentation
shapely==2.0.4 shapely==2.0.5
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
shellingham==1.5.4 shellingham==1.5.4
# via typer # via typer
...@@ -577,13 +624,17 @@ six==1.16.0 ...@@ -577,13 +624,17 @@ six==1.16.0
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
# via unstructured-client # via unstructured-client
sniffio==1.3.1 sniffio==1.3.1
# via anthropic
# via anyio # via anyio
# via httpx # via httpx
# via openai
soupsieve==2.5 soupsieve==2.5
# via beautifulsoup4 # via beautifulsoup4
sqlalchemy==2.0.30 sqlalchemy==2.0.31
# via alembic
# via langchain # via langchain
# via langchain-community # via langchain-community
# via open-webui
starlette==0.37.2 starlette==0.37.2
# via fastapi # via fastapi
sympy==1.12 sympy==1.12
...@@ -598,7 +649,10 @@ tenacity==8.3.0 ...@@ -598,7 +649,10 @@ tenacity==8.3.0
# via langchain-core # via langchain-core
threadpoolctl==3.5.0 threadpoolctl==3.5.0
# via scikit-learn # via scikit-learn
tiktoken==0.7.0
# via open-webui
tokenizers==0.15.2 tokenizers==0.15.2
# via anthropic
# via chromadb # via chromadb
# via faster-whisper # via faster-whisper
# via transformers # via transformers
...@@ -609,18 +663,24 @@ tqdm==4.66.4 ...@@ -609,18 +663,24 @@ tqdm==4.66.4
# via google-generativeai # via google-generativeai
# via huggingface-hub # via huggingface-hub
# via nltk # via nltk
# via openai
# via sentence-transformers # via sentence-transformers
# via transformers # via transformers
# via unstructured
transformers==4.39.3 transformers==4.39.3
# via sentence-transformers # via sentence-transformers
typer==0.12.3 typer==0.12.3
# via chromadb # via chromadb
# via fastapi-cli # via fastapi-cli
typing-extensions==4.11.0 typing-extensions==4.11.0
# via alembic
# via anthropic
# via chromadb # via chromadb
# via fastapi # via fastapi
# via google-generativeai # via google-generativeai
# via huggingface-hub # via huggingface-hub
# via langchain-core
# via openai
# via opentelemetry-sdk # via opentelemetry-sdk
# via pydantic # via pydantic
# via pydantic-core # via pydantic-core
...@@ -640,7 +700,7 @@ tzlocal==5.2 ...@@ -640,7 +700,7 @@ tzlocal==5.2
# via extract-msg # via extract-msg
ujson==5.10.0 ujson==5.10.0
# via fastapi # via fastapi
unstructured==0.14.0 unstructured==0.15.0
# via open-webui # via open-webui
unstructured-client==0.22.0 unstructured-client==0.22.0
# via unstructured # via unstructured
...@@ -648,6 +708,7 @@ uritemplate==4.1.1 ...@@ -648,6 +708,7 @@ uritemplate==4.1.1
# via google-api-python-client # via google-api-python-client
urllib3==2.2.1 urllib3==2.2.1
# via botocore # via botocore
# via docker
# via kubernetes # via kubernetes
# via requests # via requests
# via unstructured-client # via unstructured-client
...@@ -676,6 +737,8 @@ wsproto==1.2.0 ...@@ -676,6 +737,8 @@ wsproto==1.2.0
# via simple-websocket # via simple-websocket
xlrd==2.0.1 xlrd==2.0.1
# via open-webui # via open-webui
xlsxwriter==3.2.0
# via python-pptx
yarl==1.9.4 yarl==1.9.4
# via aiohttp # via aiohttp
youtube-transcript-api==0.6.2 youtube-transcript-api==0.6.2
......
...@@ -16,10 +16,17 @@ aiohttp==3.9.5 ...@@ -16,10 +16,17 @@ aiohttp==3.9.5
# via open-webui # via open-webui
aiosignal==1.3.1 aiosignal==1.3.1
# via aiohttp # via aiohttp
alembic==1.13.2
# via open-webui
annotated-types==0.6.0 annotated-types==0.6.0
# via pydantic # via pydantic
anyio==4.3.0 anthropic==0.32.0
# via open-webui
anyio==4.4.0
# via anthropic
# via httpx # via httpx
# via langfuse
# via openai
# via starlette # via starlette
# via watchfiles # via watchfiles
apscheduler==3.10.4 apscheduler==3.10.4
...@@ -32,6 +39,7 @@ asgiref==3.8.1 ...@@ -32,6 +39,7 @@ asgiref==3.8.1
# via opentelemetry-instrumentation-asgi # via opentelemetry-instrumentation-asgi
attrs==23.2.0 attrs==23.2.0
# via aiohttp # via aiohttp
# via pytest-docker
authlib==1.3.1 authlib==1.3.1
# via open-webui # via open-webui
av==11.0.0 av==11.0.0
...@@ -76,9 +84,9 @@ chardet==5.2.0 ...@@ -76,9 +84,9 @@ chardet==5.2.0
charset-normalizer==3.3.2 charset-normalizer==3.3.2
# via requests # via requests
# via unstructured-client # via unstructured-client
chroma-hnswlib==0.7.3 chroma-hnswlib==0.7.5
# via chromadb # via chromadb
chromadb==0.5.0 chromadb==0.5.4
# via langchain-chroma # via langchain-chroma
# via open-webui # via open-webui
click==8.1.7 click==8.1.7
...@@ -102,7 +110,6 @@ cryptography==42.0.7 ...@@ -102,7 +110,6 @@ cryptography==42.0.7
ctranslate2==4.2.1 ctranslate2==4.2.1
# via faster-whisper # via faster-whisper
dataclasses-json==0.6.6 dataclasses-json==0.6.6
# via langchain
# via langchain-community # via langchain-community
# via unstructured # via unstructured
# via unstructured-client # via unstructured-client
...@@ -113,11 +120,17 @@ defusedxml==0.7.1 ...@@ -113,11 +120,17 @@ defusedxml==0.7.1
deprecated==1.2.14 deprecated==1.2.14
# via opentelemetry-api # via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc # via opentelemetry-exporter-otlp-proto-grpc
distro==1.9.0
# via anthropic
# via openai
dnspython==2.6.1 dnspython==2.6.1
# via email-validator # via email-validator
# via pymongo
docker==7.1.0
# via open-webui
docx2txt==0.8 docx2txt==0.8
# via open-webui # via open-webui
duckduckgo-search==6.1.5 duckduckgo-search==6.2.6
# via open-webui # via open-webui
easygui==0.98.3 easygui==0.98.3
# via oletools # via oletools
...@@ -208,8 +221,11 @@ httplib2==0.22.0 ...@@ -208,8 +221,11 @@ httplib2==0.22.0
httptools==0.6.1 httptools==0.6.1
# via uvicorn # via uvicorn
httpx==0.27.0 httpx==0.27.0
# via anthropic
# via chromadb
# via fastapi # via fastapi
# via langfuse # via langfuse
# via openai
huggingface-hub==0.23.0 huggingface-hub==0.23.0
# via faster-whisper # via faster-whisper
# via sentence-transformers # via sentence-transformers
...@@ -229,12 +245,16 @@ importlib-metadata==7.0.0 ...@@ -229,12 +245,16 @@ importlib-metadata==7.0.0
# via opentelemetry-api # via opentelemetry-api
importlib-resources==6.4.0 importlib-resources==6.4.0
# via chromadb # via chromadb
iniconfig==2.0.0
# via pytest
itsdangerous==2.2.0 itsdangerous==2.2.0
# via flask # via flask
jinja2==3.1.4 jinja2==3.1.4
# via fastapi # via fastapi
# via flask # via flask
# via torch # via torch
jiter==0.5.0
# via anthropic
jmespath==1.0.1 jmespath==1.0.1
# via boto3 # via boto3
# via botocore # via botocore
...@@ -249,14 +269,14 @@ jsonpointer==2.4 ...@@ -249,14 +269,14 @@ jsonpointer==2.4
# via jsonpatch # via jsonpatch
kubernetes==29.0.0 kubernetes==29.0.0
# via chromadb # via chromadb
langchain==0.2.0 langchain==0.2.11
# via langchain-community # via langchain-community
# via open-webui # via open-webui
langchain-chroma==0.1.1 langchain-chroma==0.1.2
# via open-webui # via open-webui
langchain-community==0.2.0 langchain-community==0.2.10
# via open-webui # via open-webui
langchain-core==0.2.1 langchain-core==0.2.28
# via langchain # via langchain
# via langchain-chroma # via langchain-chroma
# via langchain-community # via langchain-community
...@@ -265,22 +285,26 @@ langchain-text-splitters==0.2.0 ...@@ -265,22 +285,26 @@ langchain-text-splitters==0.2.0
# via langchain # via langchain
langdetect==1.0.9 langdetect==1.0.9
# via unstructured # via unstructured
langfuse==2.33.0 langfuse==2.39.2
# via open-webui # via open-webui
langsmith==0.1.57 langsmith==0.1.96
# via langchain # via langchain
# via langchain-community # via langchain-community
# via langchain-core # via langchain-core
lark==1.1.8 lark==1.1.8
# via rtfde # via rtfde
lxml==5.2.2 lxml==5.2.2
# via python-pptx
# via unstructured # via unstructured
mako==1.3.5
# via alembic
markdown==3.6 markdown==3.6
# via open-webui # via open-webui
markdown-it-py==3.0.0 markdown-it-py==3.0.0
# via rich # via rich
markupsafe==2.1.5 markupsafe==2.1.5
# via jinja2 # via jinja2
# via mako
# via werkzeug # via werkzeug
marshmallow==3.21.2 marshmallow==3.21.2
# via dataclasses-json # via dataclasses-json
...@@ -339,11 +363,13 @@ onnxruntime==1.17.3 ...@@ -339,11 +363,13 @@ onnxruntime==1.17.3
# via chromadb # via chromadb
# via faster-whisper # via faster-whisper
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
openai==1.38.0
# via open-webui
opencv-python==4.9.0.80 opencv-python==4.9.0.80
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
opencv-python-headless==4.9.0.80 opencv-python-headless==4.10.0.84
# via open-webui # via open-webui
openpyxl==3.1.2 openpyxl==3.1.5
# via open-webui # via open-webui
opentelemetry-api==1.24.0 opentelemetry-api==1.24.0
# via chromadb # via chromadb
...@@ -380,7 +406,6 @@ ordered-set==4.1.0 ...@@ -380,7 +406,6 @@ ordered-set==4.1.0
# via deepdiff # via deepdiff
orjson==3.10.3 orjson==3.10.3
# via chromadb # via chromadb
# via duckduckgo-search
# via fastapi # via fastapi
# via langsmith # via langsmith
overrides==7.7.0 overrides==7.7.0
...@@ -393,6 +418,7 @@ packaging==23.2 ...@@ -393,6 +418,7 @@ packaging==23.2
# via langfuse # via langfuse
# via marshmallow # via marshmallow
# via onnxruntime # via onnxruntime
# via pytest
# via transformers # via transformers
# via unstructured-client # via unstructured-client
pandas==2.2.2 pandas==2.2.2
...@@ -403,19 +429,24 @@ pathspec==0.12.1 ...@@ -403,19 +429,24 @@ pathspec==0.12.1
# via black # via black
pcodedmp==1.2.6 pcodedmp==1.2.6
# via oletools # via oletools
peewee==3.17.5 peewee==3.17.6
# via open-webui # via open-webui
# via peewee-migrate # via peewee-migrate
peewee-migrate==1.12.2 peewee-migrate==1.12.2
# via open-webui # via open-webui
pillow==10.3.0 pillow==10.3.0
# via fpdf2 # via fpdf2
# via python-pptx
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
# via sentence-transformers # via sentence-transformers
platformdirs==4.2.1 platformdirs==4.2.1
# via black # via black
pluggy==1.5.0
# via pytest
posthog==3.5.0 posthog==3.5.0
# via chromadb # via chromadb
primp==0.5.5
# via duckduckgo-search
proto-plus==1.23.0 proto-plus==1.23.0
# via google-ai-generativelanguage # via google-ai-generativelanguage
# via google-api-core # via google-api-core
...@@ -428,6 +459,9 @@ protobuf==4.25.3 ...@@ -428,6 +459,9 @@ protobuf==4.25.3
# via onnxruntime # via onnxruntime
# via opentelemetry-proto # via opentelemetry-proto
# via proto-plus # via proto-plus
psutil==6.0.0
# via open-webui
# via unstructured
psycopg2-binary==2.9.9 psycopg2-binary==2.9.9
# via open-webui # via open-webui
pyasn1==0.6.0 pyasn1==0.6.0
...@@ -440,7 +474,8 @@ pyclipper==1.3.0.post5 ...@@ -440,7 +474,8 @@ pyclipper==1.3.0.post5
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
pycparser==2.22 pycparser==2.22
# via cffi # via cffi
pydantic==2.7.1 pydantic==2.8.2
# via anthropic
# via chromadb # via chromadb
# via fastapi # via fastapi
# via google-generativeai # via google-generativeai
...@@ -449,7 +484,8 @@ pydantic==2.7.1 ...@@ -449,7 +484,8 @@ pydantic==2.7.1
# via langfuse # via langfuse
# via langsmith # via langsmith
# via open-webui # via open-webui
pydantic-core==2.18.2 # via openai
pydantic-core==2.20.1
# via pydantic # via pydantic
pydub==0.25.1 pydub==0.25.1
# via open-webui # via open-webui
...@@ -457,7 +493,9 @@ pygments==2.18.0 ...@@ -457,7 +493,9 @@ pygments==2.18.0
# via rich # via rich
pyjwt==2.8.0 pyjwt==2.8.0
# via open-webui # via open-webui
pymysql==1.1.0 pymongo==4.8.0
# via open-webui
pymysql==1.1.1
# via open-webui # via open-webui
pypandoc==1.13 pypandoc==1.13
# via open-webui # via open-webui
...@@ -471,8 +509,11 @@ pypika==0.48.9 ...@@ -471,8 +509,11 @@ pypika==0.48.9
# via chromadb # via chromadb
pyproject-hooks==1.1.0 pyproject-hooks==1.1.0
# via build # via build
pyreqwest-impersonate==0.4.7 pytest==8.2.2
# via duckduckgo-search # via open-webui
# via pytest-docker
pytest-docker==3.1.1
# via open-webui
python-dateutil==2.9.0.post0 python-dateutil==2.9.0.post0
# via botocore # via botocore
# via kubernetes # via kubernetes
...@@ -492,7 +533,9 @@ python-magic==0.4.27 ...@@ -492,7 +533,9 @@ python-magic==0.4.27
python-multipart==0.0.9 python-multipart==0.0.9
# via fastapi # via fastapi
# via open-webui # via open-webui
python-socketio==5.11.2 python-pptx==0.6.23
# via open-webui
python-socketio==5.11.3
# via open-webui # via open-webui
pytube==15.0.0 pytube==15.0.0
# via open-webui # via open-webui
...@@ -516,15 +559,18 @@ rank-bm25==0.2.2 ...@@ -516,15 +559,18 @@ rank-bm25==0.2.2
# via open-webui # via open-webui
rapidfuzz==3.9.0 rapidfuzz==3.9.0
# via unstructured # via unstructured
rapidocr-onnxruntime==1.3.22 rapidocr-onnxruntime==1.3.24
# via open-webui # via open-webui
red-black-tree-mod==1.20 red-black-tree-mod==1.20
# via extract-msg # via extract-msg
redis==5.0.8
# via open-webui
regex==2024.5.10 regex==2024.5.10
# via nltk # via nltk
# via tiktoken
# via transformers # via transformers
requests==2.32.2 requests==2.32.3
# via chromadb # via docker
# via google-api-core # via google-api-core
# via huggingface-hub # via huggingface-hub
# via kubernetes # via kubernetes
...@@ -534,6 +580,7 @@ requests==2.32.2 ...@@ -534,6 +580,7 @@ requests==2.32.2
# via open-webui # via open-webui
# via posthog # via posthog
# via requests-oauthlib # via requests-oauthlib
# via tiktoken
# via transformers # via transformers
# via unstructured # via unstructured
# via unstructured-client # via unstructured-client
...@@ -556,12 +603,12 @@ scikit-learn==1.4.2 ...@@ -556,12 +603,12 @@ scikit-learn==1.4.2
scipy==1.13.0 scipy==1.13.0
# via scikit-learn # via scikit-learn
# via sentence-transformers # via sentence-transformers
sentence-transformers==2.7.0 sentence-transformers==3.0.1
# via open-webui # via open-webui
setuptools==69.5.1 setuptools==69.5.1
# via ctranslate2 # via ctranslate2
# via opentelemetry-instrumentation # via opentelemetry-instrumentation
shapely==2.0.4 shapely==2.0.5
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
shellingham==1.5.4 shellingham==1.5.4
# via typer # via typer
...@@ -577,13 +624,17 @@ six==1.16.0 ...@@ -577,13 +624,17 @@ six==1.16.0
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
# via unstructured-client # via unstructured-client
sniffio==1.3.1 sniffio==1.3.1
# via anthropic
# via anyio # via anyio
# via httpx # via httpx
# via openai
soupsieve==2.5 soupsieve==2.5
# via beautifulsoup4 # via beautifulsoup4
sqlalchemy==2.0.30 sqlalchemy==2.0.31
# via alembic
# via langchain # via langchain
# via langchain-community # via langchain-community
# via open-webui
starlette==0.37.2 starlette==0.37.2
# via fastapi # via fastapi
sympy==1.12 sympy==1.12
...@@ -598,7 +649,10 @@ tenacity==8.3.0 ...@@ -598,7 +649,10 @@ tenacity==8.3.0
# via langchain-core # via langchain-core
threadpoolctl==3.5.0 threadpoolctl==3.5.0
# via scikit-learn # via scikit-learn
tiktoken==0.7.0
# via open-webui
tokenizers==0.15.2 tokenizers==0.15.2
# via anthropic
# via chromadb # via chromadb
# via faster-whisper # via faster-whisper
# via transformers # via transformers
...@@ -609,18 +663,24 @@ tqdm==4.66.4 ...@@ -609,18 +663,24 @@ tqdm==4.66.4
# via google-generativeai # via google-generativeai
# via huggingface-hub # via huggingface-hub
# via nltk # via nltk
# via openai
# via sentence-transformers # via sentence-transformers
# via transformers # via transformers
# via unstructured
transformers==4.39.3 transformers==4.39.3
# via sentence-transformers # via sentence-transformers
typer==0.12.3 typer==0.12.3
# via chromadb # via chromadb
# via fastapi-cli # via fastapi-cli
typing-extensions==4.11.0 typing-extensions==4.11.0
# via alembic
# via anthropic
# via chromadb # via chromadb
# via fastapi # via fastapi
# via google-generativeai # via google-generativeai
# via huggingface-hub # via huggingface-hub
# via langchain-core
# via openai
# via opentelemetry-sdk # via opentelemetry-sdk
# via pydantic # via pydantic
# via pydantic-core # via pydantic-core
...@@ -640,7 +700,7 @@ tzlocal==5.2 ...@@ -640,7 +700,7 @@ tzlocal==5.2
# via extract-msg # via extract-msg
ujson==5.10.0 ujson==5.10.0
# via fastapi # via fastapi
unstructured==0.14.0 unstructured==0.15.0
# via open-webui # via open-webui
unstructured-client==0.22.0 unstructured-client==0.22.0
# via unstructured # via unstructured
...@@ -648,6 +708,7 @@ uritemplate==4.1.1 ...@@ -648,6 +708,7 @@ uritemplate==4.1.1
# via google-api-python-client # via google-api-python-client
urllib3==2.2.1 urllib3==2.2.1
# via botocore # via botocore
# via docker
# via kubernetes # via kubernetes
# via requests # via requests
# via unstructured-client # via unstructured-client
...@@ -676,6 +737,8 @@ wsproto==1.2.0 ...@@ -676,6 +737,8 @@ wsproto==1.2.0
# via simple-websocket # via simple-websocket
xlrd==2.0.1 xlrd==2.0.1
# via open-webui # via open-webui
xlsxwriter==3.2.0
# via python-pptx
yarl==1.9.4 yarl==1.9.4
# via aiohttp # via aiohttp
youtube-transcript-api==0.6.2 youtube-transcript-api==0.6.2
......
...@@ -154,3 +154,7 @@ input[type='number'] { ...@@ -154,3 +154,7 @@ input[type='number'] {
.tippy-box[data-theme~='dark'] { .tippy-box[data-theme~='dark'] {
@apply rounded-lg bg-gray-950 text-xs border border-gray-900 shadow-xl; @apply rounded-lg bg-gray-950 text-xs border border-gray-900 shadow-xl;
} }
.password {
-webkit-text-security: disc;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment