"src/lib/vscode:/vscode.git/clone" did not exist on "dcac8a773bb1a6271cde85d7d7b1183f3894e650"
Commit aba63088 authored by Jun Siang Cheah's avatar Jun Siang Cheah
Browse files

Merge remote-tracking branch 'upstream/dev' into feat/include-git-hash-everywhere

parents 4fdb26fd 7b81271b
...@@ -19,27 +19,20 @@ from starlette.exceptions import HTTPException as StarletteHTTPException ...@@ -19,27 +19,20 @@ from starlette.exceptions import HTTPException as StarletteHTTPException
from starlette.middleware.base import BaseHTTPMiddleware from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import StreamingResponse, Response from starlette.responses import StreamingResponse, Response
from apps.ollama.main import app as ollama_app from apps.ollama.main import app as ollama_app, get_all_models as get_ollama_models
from apps.openai.main import app as openai_app from apps.openai.main import app as openai_app, get_all_models as get_openai_models
from apps.litellm.main import (
app as litellm_app,
start_litellm_background,
shutdown_litellm_background,
)
from apps.audio.main import app as audio_app from apps.audio.main import app as audio_app
from apps.images.main import app as images_app from apps.images.main import app as images_app
from apps.rag.main import app as rag_app from apps.rag.main import app as rag_app
from apps.web.main import app as webui_app from apps.webui.main import app as webui_app
import asyncio import asyncio
from pydantic import BaseModel from pydantic import BaseModel
from typing import List from typing import List, Optional
from apps.webui.models.models import Models, ModelModel
from utils.utils import get_admin_user from utils.utils import get_admin_user, get_verified_user
from apps.rag.utils import rag_messages from apps.rag.utils import rag_messages
from config import ( from config import (
...@@ -53,7 +46,8 @@ from config import ( ...@@ -53,7 +46,8 @@ from config import (
FRONTEND_BUILD_DIR, FRONTEND_BUILD_DIR,
CACHE_DIR, CACHE_DIR,
STATIC_DIR, STATIC_DIR,
ENABLE_LITELLM, ENABLE_OPENAI_API,
ENABLE_OLLAMA_API,
ENABLE_MODEL_FILTER, ENABLE_MODEL_FILTER,
MODEL_FILTER_LIST, MODEL_FILTER_LIST,
GLOBAL_LOG_LEVEL, GLOBAL_LOG_LEVEL,
...@@ -100,11 +94,7 @@ https://github.com/open-webui/open-webui ...@@ -100,11 +94,7 @@ https://github.com/open-webui/open-webui
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
if ENABLE_LITELLM:
asyncio.create_task(start_litellm_background())
yield yield
if ENABLE_LITELLM:
await shutdown_litellm_background()
app = FastAPI( app = FastAPI(
...@@ -112,11 +102,19 @@ app = FastAPI( ...@@ -112,11 +102,19 @@ app = FastAPI(
) )
app.state.config = AppConfig() app.state.config = AppConfig()
app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.config.WEBHOOK_URL = WEBHOOK_URL app.state.config.WEBHOOK_URL = WEBHOOK_URL
app.state.MODELS = {}
origins = ["*"] origins = ["*"]
...@@ -233,6 +231,11 @@ app.add_middleware( ...@@ -233,6 +231,11 @@ app.add_middleware(
@app.middleware("http") @app.middleware("http")
async def check_url(request: Request, call_next): async def check_url(request: Request, call_next):
if len(app.state.MODELS) == 0:
await get_all_models()
else:
pass
start_time = int(time.time()) start_time = int(time.time())
response = await call_next(request) response = await call_next(request)
process_time = int(time.time()) - start_time process_time = int(time.time()) - start_time
...@@ -249,9 +252,8 @@ async def update_embedding_function(request: Request, call_next): ...@@ -249,9 +252,8 @@ async def update_embedding_function(request: Request, call_next):
return response return response
app.mount("/litellm/api", litellm_app)
app.mount("/ollama", ollama_app) app.mount("/ollama", ollama_app)
app.mount("/openai/api", openai_app) app.mount("/openai", openai_app)
app.mount("/images/api/v1", images_app) app.mount("/images/api/v1", images_app)
app.mount("/audio/api/v1", audio_app) app.mount("/audio/api/v1", audio_app)
...@@ -262,6 +264,87 @@ app.mount("/api/v1", webui_app) ...@@ -262,6 +264,87 @@ app.mount("/api/v1", webui_app)
webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
async def get_all_models():
openai_models = []
ollama_models = []
if app.state.config.ENABLE_OPENAI_API:
openai_models = await get_openai_models()
openai_models = openai_models["data"]
if app.state.config.ENABLE_OLLAMA_API:
ollama_models = await get_ollama_models()
ollama_models = [
{
"id": model["model"],
"name": model["name"],
"object": "model",
"created": int(time.time()),
"owned_by": "ollama",
"ollama": model,
}
for model in ollama_models["models"]
]
models = openai_models + ollama_models
custom_models = Models.get_all_models()
for custom_model in custom_models:
if custom_model.base_model_id == None:
for model in models:
if (
custom_model.id == model["id"]
or custom_model.id == model["id"].split(":")[0]
):
model["name"] = custom_model.name
model["info"] = custom_model.model_dump()
else:
owned_by = "openai"
for model in models:
if (
custom_model.base_model_id == model["id"]
or custom_model.base_model_id == model["id"].split(":")[0]
):
owned_by = model["owned_by"]
break
models.append(
{
"id": custom_model.id,
"name": custom_model.name,
"object": "model",
"created": custom_model.created_at,
"owned_by": owned_by,
"info": custom_model.model_dump(),
"preset": True,
}
)
app.state.MODELS = {model["id"]: model for model in models}
webui_app.state.MODELS = app.state.MODELS
return models
@app.get("/api/models")
async def get_models(user=Depends(get_verified_user)):
models = await get_all_models()
if app.state.config.ENABLE_MODEL_FILTER:
if user.role == "user":
models = list(
filter(
lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
models,
)
)
return {"data": models}
return {"data": models}
@app.get("/api/config") @app.get("/api/config")
async def get_app_config(): async def get_app_config():
# Checking and Handling the Absence of 'ui' in CONFIG_DATA # Checking and Handling the Absence of 'ui' in CONFIG_DATA
...@@ -276,12 +359,13 @@ async def get_app_config(): ...@@ -276,12 +359,13 @@ async def get_app_config():
"name": WEBUI_NAME, "name": WEBUI_NAME,
"version": VERSION, "version": VERSION,
"auth": WEBUI_AUTH, "auth": WEBUI_AUTH,
"auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
"enable_signup": webui_app.state.config.ENABLE_SIGNUP,
"enable_image_generation": images_app.state.config.ENABLED,
"enable_admin_export": ENABLE_ADMIN_EXPORT,
"default_locale": default_locale, "default_locale": default_locale,
"images": images_app.state.config.ENABLED,
"default_models": webui_app.state.config.DEFAULT_MODELS, "default_models": webui_app.state.config.DEFAULT_MODELS,
"default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS, "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
"trusted_header_auth": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
"admin_export_enabled": ENABLE_ADMIN_EXPORT,
} }
...@@ -305,15 +389,6 @@ async def update_model_filter_config( ...@@ -305,15 +389,6 @@ async def update_model_filter_config(
app.state.config.ENABLE_MODEL_FILTER = form_data.enabled app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
app.state.config.MODEL_FILTER_LIST = form_data.models app.state.config.MODEL_FILTER_LIST = form_data.models
ollama_app.state.config.ENABLE_MODEL_FILTER = app.state.config.ENABLE_MODEL_FILTER
ollama_app.state.config.MODEL_FILTER_LIST = app.state.config.MODEL_FILTER_LIST
openai_app.state.config.ENABLE_MODEL_FILTER = app.state.config.ENABLE_MODEL_FILTER
openai_app.state.config.MODEL_FILTER_LIST = app.state.config.MODEL_FILTER_LIST
litellm_app.state.ENABLE_MODEL_FILTER = app.state.config.ENABLE_MODEL_FILTER
litellm_app.state.MODEL_FILTER_LIST = app.state.config.MODEL_FILTER_LIST
return { return {
"enabled": app.state.config.ENABLE_MODEL_FILTER, "enabled": app.state.config.ENABLE_MODEL_FILTER,
"models": app.state.config.MODEL_FILTER_LIST, "models": app.state.config.MODEL_FILTER_LIST,
...@@ -334,7 +409,6 @@ class UrlForm(BaseModel): ...@@ -334,7 +409,6 @@ class UrlForm(BaseModel):
@app.post("/api/webhook") @app.post("/api/webhook")
async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)): async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
app.state.config.WEBHOOK_URL = form_data.url app.state.config.WEBHOOK_URL = form_data.url
webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
return { return {
......
...@@ -18,8 +18,6 @@ psycopg2-binary==2.9.9 ...@@ -18,8 +18,6 @@ psycopg2-binary==2.9.9
PyMySQL==1.1.1 PyMySQL==1.1.1
bcrypt==4.1.3 bcrypt==4.1.3
litellm[proxy]==1.37.20
boto3==1.34.110 boto3==1.34.110
argon2-cffi==23.1.0 argon2-cffi==23.1.0
......
litellm_settings:
drop_params: true
model_list:
- model_name: 'HuggingFace: Mistral: Mistral 7B Instruct v0.1'
litellm_params:
model: huggingface/mistralai/Mistral-7B-Instruct-v0.1
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Mistral: Mistral 7B Instruct v0.2'
litellm_params:
model: huggingface/mistralai/Mistral-7B-Instruct-v0.2
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Meta: Llama 3 8B Instruct'
litellm_params:
model: huggingface/meta-llama/Meta-Llama-3-8B-Instruct
api_key: os.environ/HF_TOKEN
max_tokens: 2047
- model_name: 'HuggingFace: Mistral: Mixtral 8x7B Instruct v0.1'
litellm_params:
model: huggingface/mistralai/Mixtral-8x7B-Instruct-v0.1
api_key: os.environ/HF_TOKEN
max_tokens: 8192
- model_name: 'HuggingFace: Microsoft: Phi-3 Mini-4K-Instruct'
litellm_params:
model: huggingface/microsoft/Phi-3-mini-4k-instruct
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Google: Gemma 7B 1.1'
litellm_params:
model: huggingface/google/gemma-1.1-7b-it
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Yi-1.5 34B Chat'
litellm_params:
model: huggingface/01-ai/Yi-1.5-34B-Chat
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Nous Research: Nous Hermes 2 Mixtral 8x7B DPO'
litellm_params:
model: huggingface/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
api_key: os.environ/HF_TOKEN
max_tokens: 2048
...@@ -34,11 +34,6 @@ fi ...@@ -34,11 +34,6 @@ fi
# Check if SPACE_ID is set, if so, configure for space # Check if SPACE_ID is set, if so, configure for space
if [ -n "$SPACE_ID" ]; then if [ -n "$SPACE_ID" ]; then
echo "Configuring for HuggingFace Space deployment" echo "Configuring for HuggingFace Space deployment"
# Copy litellm_config.yaml with specified ownership
echo "Copying litellm_config.yaml to the desired location with specified ownership..."
cp -f ./space/litellm_config.yaml ./data/litellm/config.yaml
if [ -n "$ADMIN_USER_EMAIL" ] && [ -n "$ADMIN_USER_PASSWORD" ]; then if [ -n "$ADMIN_USER_EMAIL" ] && [ -n "$ADMIN_USER_PASSWORD" ]; then
echo "Admin user configured, creating" echo "Admin user configured, creating"
WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*' & WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*' &
......
from pathlib import Path from pathlib import Path
import hashlib import hashlib
import json
import re import re
from datetime import timedelta from datetime import timedelta
from typing import Optional from typing import Optional
...@@ -110,3 +111,76 @@ def parse_duration(duration: str) -> Optional[timedelta]: ...@@ -110,3 +111,76 @@ def parse_duration(duration: str) -> Optional[timedelta]:
total_duration += timedelta(weeks=number) total_duration += timedelta(weeks=number)
return total_duration return total_duration
def parse_ollama_modelfile(model_text):
parameters_meta = {
"mirostat": int,
"mirostat_eta": float,
"mirostat_tau": float,
"num_ctx": int,
"repeat_last_n": int,
"repeat_penalty": float,
"temperature": float,
"seed": int,
"stop": str,
"tfs_z": float,
"num_predict": int,
"top_k": int,
"top_p": float,
}
data = {"base_model_id": None, "params": {}}
# Parse base model
base_model_match = re.search(
r"^FROM\s+(\w+)", model_text, re.MULTILINE | re.IGNORECASE
)
if base_model_match:
data["base_model_id"] = base_model_match.group(1)
# Parse template
template_match = re.search(
r'TEMPLATE\s+"""(.+?)"""', model_text, re.DOTALL | re.IGNORECASE
)
if template_match:
data["params"] = {"template": template_match.group(1).strip()}
# Parse stops
stops = re.findall(r'PARAMETER stop "(.*?)"', model_text, re.IGNORECASE)
if stops:
data["params"]["stop"] = stops
# Parse other parameters from the provided list
for param, param_type in parameters_meta.items():
param_match = re.search(rf"PARAMETER {param} (.+)", model_text, re.IGNORECASE)
if param_match:
value = param_match.group(1)
if param_type == int:
value = int(value)
elif param_type == float:
value = float(value)
data["params"][param] = value
# Parse adapter
adapter_match = re.search(r"ADAPTER (.+)", model_text, re.IGNORECASE)
if adapter_match:
data["params"]["adapter"] = adapter_match.group(1)
# Parse system description
system_desc_match = re.search(
r'SYSTEM\s+"""(.+?)"""', model_text, re.DOTALL | re.IGNORECASE
)
if system_desc_match:
data["params"]["system"] = system_desc_match.group(1).strip()
# Parse messages
messages = []
message_matches = re.findall(r"MESSAGE (\w+) (.+)", model_text, re.IGNORECASE)
for role, content in message_matches:
messages.append({"role": role, "content": content})
if messages:
data["params"]["messages"] = messages
return data
from apps.webui.models.models import Models, ModelModel, ModelForm, ModelResponse
def get_model_id_from_custom_model_id(id: str):
model = Models.get_model_by_id(id)
if model:
return model.id
else:
return id
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi import HTTPException, status, Depends from fastapi import HTTPException, status, Depends
from apps.web.models.users import Users from apps.webui.models.users import Users
from pydantic import BaseModel from pydantic import BaseModel
from typing import Union, Optional from typing import Union, Optional
......
...@@ -273,7 +273,6 @@ langsmith==0.1.57 ...@@ -273,7 +273,6 @@ langsmith==0.1.57
# via langchain-community # via langchain-community
# via langchain-core # via langchain-core
litellm==1.37.20 litellm==1.37.20
# via litellm
# via open-webui # via open-webui
lxml==5.2.2 lxml==5.2.2
# via unstructured # via unstructured
...@@ -396,7 +395,6 @@ pandas==2.2.2 ...@@ -396,7 +395,6 @@ pandas==2.2.2
# via open-webui # via open-webui
passlib==1.7.4 passlib==1.7.4
# via open-webui # via open-webui
# via passlib
pathspec==0.12.1 pathspec==0.12.1
# via black # via black
peewee==3.17.5 peewee==3.17.5
...@@ -454,7 +452,6 @@ pygments==2.18.0 ...@@ -454,7 +452,6 @@ pygments==2.18.0
pyjwt==2.8.0 pyjwt==2.8.0
# via litellm # via litellm
# via open-webui # via open-webui
# via pyjwt
pymysql==1.1.0 pymysql==1.1.0
# via open-webui # via open-webui
pypandoc==1.13 pypandoc==1.13
...@@ -559,6 +556,9 @@ scipy==1.13.0 ...@@ -559,6 +556,9 @@ scipy==1.13.0
# via sentence-transformers # via sentence-transformers
sentence-transformers==2.7.0 sentence-transformers==2.7.0
# via open-webui # via open-webui
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
shapely==2.0.4 shapely==2.0.4
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
shellingham==1.5.4 shellingham==1.5.4
...@@ -659,7 +659,6 @@ uvicorn==0.22.0 ...@@ -659,7 +659,6 @@ uvicorn==0.22.0
# via fastapi # via fastapi
# via litellm # via litellm
# via open-webui # via open-webui
# via uvicorn
uvloop==0.19.0 uvloop==0.19.0
# via uvicorn # via uvicorn
validators==0.28.1 validators==0.28.1
...@@ -687,6 +686,3 @@ youtube-transcript-api==0.6.2 ...@@ -687,6 +686,3 @@ youtube-transcript-api==0.6.2
# via open-webui # via open-webui
zipp==3.18.1 zipp==3.18.1
# via importlib-metadata # via importlib-metadata
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
...@@ -273,7 +273,6 @@ langsmith==0.1.57 ...@@ -273,7 +273,6 @@ langsmith==0.1.57
# via langchain-community # via langchain-community
# via langchain-core # via langchain-core
litellm==1.37.20 litellm==1.37.20
# via litellm
# via open-webui # via open-webui
lxml==5.2.2 lxml==5.2.2
# via unstructured # via unstructured
...@@ -396,7 +395,6 @@ pandas==2.2.2 ...@@ -396,7 +395,6 @@ pandas==2.2.2
# via open-webui # via open-webui
passlib==1.7.4 passlib==1.7.4
# via open-webui # via open-webui
# via passlib
pathspec==0.12.1 pathspec==0.12.1
# via black # via black
peewee==3.17.5 peewee==3.17.5
...@@ -454,7 +452,6 @@ pygments==2.18.0 ...@@ -454,7 +452,6 @@ pygments==2.18.0
pyjwt==2.8.0 pyjwt==2.8.0
# via litellm # via litellm
# via open-webui # via open-webui
# via pyjwt
pymysql==1.1.0 pymysql==1.1.0
# via open-webui # via open-webui
pypandoc==1.13 pypandoc==1.13
...@@ -559,6 +556,9 @@ scipy==1.13.0 ...@@ -559,6 +556,9 @@ scipy==1.13.0
# via sentence-transformers # via sentence-transformers
sentence-transformers==2.7.0 sentence-transformers==2.7.0
# via open-webui # via open-webui
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
shapely==2.0.4 shapely==2.0.4
# via rapidocr-onnxruntime # via rapidocr-onnxruntime
shellingham==1.5.4 shellingham==1.5.4
...@@ -659,7 +659,6 @@ uvicorn==0.22.0 ...@@ -659,7 +659,6 @@ uvicorn==0.22.0
# via fastapi # via fastapi
# via litellm # via litellm
# via open-webui # via open-webui
# via uvicorn
uvloop==0.19.0 uvloop==0.19.0
# via uvicorn # via uvicorn
validators==0.28.1 validators==0.28.1
...@@ -687,6 +686,3 @@ youtube-transcript-api==0.6.2 ...@@ -687,6 +686,3 @@ youtube-transcript-api==0.6.2
# via open-webui # via open-webui
zipp==3.18.1 zipp==3.18.1
# via importlib-metadata # via importlib-metadata
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
...@@ -654,3 +654,35 @@ export const deleteAllChats = async (token: string) => { ...@@ -654,3 +654,35 @@ export const deleteAllChats = async (token: string) => {
return res; return res;
}; };
export const archiveAllChats = async (token: string) => {
let error = null;
const res = await fetch(`${WEBUI_API_BASE_URL}/chats/archive/all`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.then((json) => {
return json;
})
.catch((err) => {
error = err.detail;
console.log(err);
return null;
});
if (error) {
throw error;
}
return res;
};
import { WEBUI_BASE_URL } from '$lib/constants'; import { WEBUI_BASE_URL } from '$lib/constants';
export const getModels = async (token: string = '') => {
let error = null;
const res = await fetch(`${WEBUI_BASE_URL}/api/models`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = err;
return null;
});
if (error) {
throw error;
}
let models = res?.data ?? [];
models = models
.filter((models) => models)
.sort((a, b) => {
// Compare case-insensitively
const lowerA = a.name.toLowerCase();
const lowerB = b.name.toLowerCase();
if (lowerA < lowerB) return -1;
if (lowerA > lowerB) return 1;
// If same case-insensitively, sort by original strings,
// lowercase will come before uppercase due to ASCII values
if (a < b) return -1;
if (a > b) return 1;
return 0; // They are equal
});
console.log(models);
return models;
};
export const getBackendConfig = async () => { export const getBackendConfig = async () => {
let error = null; let error = null;
...@@ -196,3 +245,77 @@ export const updateWebhookUrl = async (token: string, url: string) => { ...@@ -196,3 +245,77 @@ export const updateWebhookUrl = async (token: string, url: string) => {
return res.url; return res.url;
}; };
export const getModelConfig = async (token: string): Promise<GlobalModelConfig> => {
let error = null;
const res = await fetch(`${WEBUI_BASE_URL}/api/config/models`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${token}`
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = err;
return null;
});
if (error) {
throw error;
}
return res.models;
};
export interface ModelConfig {
id: string;
name: string;
meta: ModelMeta;
base_model_id?: string;
params: ModelParams;
}
export interface ModelMeta {
description?: string;
capabilities?: object;
}
export interface ModelParams {}
export type GlobalModelConfig = ModelConfig[];
export const updateModelConfig = async (token: string, config: GlobalModelConfig) => {
let error = null;
const res = await fetch(`${WEBUI_BASE_URL}/api/config/models`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${token}`
},
body: JSON.stringify({
models: config
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = err;
return null;
});
if (error) {
throw error;
}
return res;
};
import { LITELLM_API_BASE_URL } from '$lib/constants';
export const getLiteLLMModels = async (token: string = '') => {
let error = null;
const res = await fetch(`${LITELLM_API_BASE_URL}/v1/models`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
return [];
});
if (error) {
throw error;
}
const models = Array.isArray(res) ? res : res?.data ?? null;
return models
? models
.map((model) => ({
id: model.id,
name: model.name ?? model.id,
external: true,
source: 'LiteLLM'
}))
.sort((a, b) => {
return a.name.localeCompare(b.name);
})
: models;
};
export const getLiteLLMModelInfo = async (token: string = '') => {
let error = null;
const res = await fetch(`${LITELLM_API_BASE_URL}/model/info`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
return [];
});
if (error) {
throw error;
}
const models = Array.isArray(res) ? res : res?.data ?? null;
return models;
};
type AddLiteLLMModelForm = {
name: string;
model: string;
api_base: string;
api_key: string;
rpm: string;
max_tokens: string;
};
export const addLiteLLMModel = async (token: string = '', payload: AddLiteLLMModelForm) => {
let error = null;
const res = await fetch(`${LITELLM_API_BASE_URL}/model/new`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
},
body: JSON.stringify({
model_name: payload.name,
litellm_params: {
model: payload.model,
...(payload.api_base === '' ? {} : { api_base: payload.api_base }),
...(payload.api_key === '' ? {} : { api_key: payload.api_key }),
...(isNaN(parseInt(payload.rpm)) ? {} : { rpm: parseInt(payload.rpm) }),
...(payload.max_tokens === '' ? {} : { max_tokens: payload.max_tokens })
}
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
return [];
});
if (error) {
throw error;
}
return res;
};
export const deleteLiteLLMModel = async (token: string = '', id: string) => {
let error = null;
const res = await fetch(`${LITELLM_API_BASE_URL}/model/delete`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
},
body: JSON.stringify({
id: id
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
return [];
});
if (error) {
throw error;
}
return res;
};
import { WEBUI_API_BASE_URL } from '$lib/constants'; import { WEBUI_API_BASE_URL } from '$lib/constants';
export const createNewModelfile = async (token: string, modelfile: object) => { export const addNewModel = async (token: string, model: object) => {
let error = null; let error = null;
const res = await fetch(`${WEBUI_API_BASE_URL}/modelfiles/create`, { const res = await fetch(`${WEBUI_API_BASE_URL}/models/add`, {
method: 'POST', method: 'POST',
headers: { headers: {
Accept: 'application/json', Accept: 'application/json',
'Content-Type': 'application/json', 'Content-Type': 'application/json',
authorization: `Bearer ${token}` authorization: `Bearer ${token}`
}, },
body: JSON.stringify({ body: JSON.stringify(model)
modelfile: modelfile
})
}) })
.then(async (res) => { .then(async (res) => {
if (!res.ok) throw await res.json(); if (!res.ok) throw await res.json();
...@@ -31,10 +29,10 @@ export const createNewModelfile = async (token: string, modelfile: object) => { ...@@ -31,10 +29,10 @@ export const createNewModelfile = async (token: string, modelfile: object) => {
return res; return res;
}; };
export const getModelfiles = async (token: string = '') => { export const getModelInfos = async (token: string = '') => {
let error = null; let error = null;
const res = await fetch(`${WEBUI_API_BASE_URL}/modelfiles/`, { const res = await fetch(`${WEBUI_API_BASE_URL}/models`, {
method: 'GET', method: 'GET',
headers: { headers: {
Accept: 'application/json', Accept: 'application/json',
...@@ -59,22 +57,22 @@ export const getModelfiles = async (token: string = '') => { ...@@ -59,22 +57,22 @@ export const getModelfiles = async (token: string = '') => {
throw error; throw error;
} }
return res.map((modelfile) => modelfile.modelfile); return res;
}; };
export const getModelfileByTagName = async (token: string, tagName: string) => { export const getModelById = async (token: string, id: string) => {
let error = null; let error = null;
const res = await fetch(`${WEBUI_API_BASE_URL}/modelfiles/`, { const searchParams = new URLSearchParams();
method: 'POST', searchParams.append('id', id);
const res = await fetch(`${WEBUI_API_BASE_URL}/models?${searchParams.toString()}`, {
method: 'GET',
headers: { headers: {
Accept: 'application/json', Accept: 'application/json',
'Content-Type': 'application/json', 'Content-Type': 'application/json',
authorization: `Bearer ${token}` authorization: `Bearer ${token}`
}, }
body: JSON.stringify({
tag_name: tagName
})
}) })
.then(async (res) => { .then(async (res) => {
if (!res.ok) throw await res.json(); if (!res.ok) throw await res.json();
...@@ -94,27 +92,23 @@ export const getModelfileByTagName = async (token: string, tagName: string) => { ...@@ -94,27 +92,23 @@ export const getModelfileByTagName = async (token: string, tagName: string) => {
throw error; throw error;
} }
return res.modelfile; return res;
}; };
export const updateModelfileByTagName = async ( export const updateModelById = async (token: string, id: string, model: object) => {
token: string,
tagName: string,
modelfile: object
) => {
let error = null; let error = null;
const res = await fetch(`${WEBUI_API_BASE_URL}/modelfiles/update`, { const searchParams = new URLSearchParams();
searchParams.append('id', id);
const res = await fetch(`${WEBUI_API_BASE_URL}/models/update?${searchParams.toString()}`, {
method: 'POST', method: 'POST',
headers: { headers: {
Accept: 'application/json', Accept: 'application/json',
'Content-Type': 'application/json', 'Content-Type': 'application/json',
authorization: `Bearer ${token}` authorization: `Bearer ${token}`
}, },
body: JSON.stringify({ body: JSON.stringify(model)
tag_name: tagName,
modelfile: modelfile
})
}) })
.then(async (res) => { .then(async (res) => {
if (!res.ok) throw await res.json(); if (!res.ok) throw await res.json();
...@@ -137,19 +131,19 @@ export const updateModelfileByTagName = async ( ...@@ -137,19 +131,19 @@ export const updateModelfileByTagName = async (
return res; return res;
}; };
export const deleteModelfileByTagName = async (token: string, tagName: string) => { export const deleteModelById = async (token: string, id: string) => {
let error = null; let error = null;
const res = await fetch(`${WEBUI_API_BASE_URL}/modelfiles/delete`, { const searchParams = new URLSearchParams();
searchParams.append('id', id);
const res = await fetch(`${WEBUI_API_BASE_URL}/models/delete?${searchParams.toString()}`, {
method: 'DELETE', method: 'DELETE',
headers: { headers: {
Accept: 'application/json', Accept: 'application/json',
'Content-Type': 'application/json', 'Content-Type': 'application/json',
authorization: `Bearer ${token}` authorization: `Bearer ${token}`
}, }
body: JSON.stringify({
tag_name: tagName
})
}) })
.then(async (res) => { .then(async (res) => {
if (!res.ok) throw await res.json(); if (!res.ok) throw await res.json();
......
...@@ -164,7 +164,7 @@ export const getOllamaVersion = async (token: string = '') => { ...@@ -164,7 +164,7 @@ export const getOllamaVersion = async (token: string = '') => {
throw error; throw error;
} }
return res?.version ?? ''; return res?.version ?? false;
}; };
export const getOllamaModels = async (token: string = '') => { export const getOllamaModels = async (token: string = '') => {
......
...@@ -230,7 +230,12 @@ export const getOpenAIModels = async (token: string = '') => { ...@@ -230,7 +230,12 @@ export const getOpenAIModels = async (token: string = '') => {
return models return models
? models ? models
.map((model) => ({ id: model.id, name: model.name ?? model.id, external: true })) .map((model) => ({
id: model.id,
name: model.name ?? model.id,
external: true,
custom_info: model.custom_info
}))
.sort((a, b) => { .sort((a, b) => {
return a.name.localeCompare(b.name); return a.name.localeCompare(b.name);
}) })
......
<script lang="ts"> <script lang="ts">
import fileSaver from 'file-saver';
const { saveAs } = fileSaver;
import { downloadDatabase } from '$lib/apis/utils'; import { downloadDatabase } from '$lib/apis/utils';
import { onMount, getContext } from 'svelte'; import { onMount, getContext } from 'svelte';
import { config } from '$lib/stores'; import { config, user } from '$lib/stores';
import { toast } from 'svelte-sonner'; import { toast } from 'svelte-sonner';
import { getAllUserChats } from '$lib/apis/chats';
const i18n = getContext('i18n'); const i18n = getContext('i18n');
export let saveHandler: Function; export let saveHandler: Function;
const exportAllUserChats = async () => {
let blob = new Blob([JSON.stringify(await getAllUserChats(localStorage.token))], {
type: 'application/json'
});
saveAs(blob, `all-chats-export-${Date.now()}.json`);
};
onMount(async () => { onMount(async () => {
// permissions = await getUserPermissions(localStorage.token); // permissions = await getUserPermissions(localStorage.token);
}); });
...@@ -23,10 +34,10 @@ ...@@ -23,10 +34,10 @@
<div> <div>
<div class=" mb-2 text-sm font-medium">{$i18n.t('Database')}</div> <div class=" mb-2 text-sm font-medium">{$i18n.t('Database')}</div>
<div class=" flex w-full justify-between"> {#if $config?.enable_admin_export ?? true}
<!-- <div class=" self-center text-xs font-medium">{$i18n.t('Allow Chat Deletion')}</div> --> <div class=" flex w-full justify-between">
<!-- <div class=" self-center text-xs font-medium">{$i18n.t('Allow Chat Deletion')}</div> -->
{#if $config?.admin_export_enabled ?? true}
<button <button
class=" flex rounded-md py-1.5 px-3 w-full hover:bg-gray-200 dark:hover:bg-gray-800 transition" class=" flex rounded-md py-1.5 px-3 w-full hover:bg-gray-200 dark:hover:bg-gray-800 transition"
type="button" type="button"
...@@ -55,8 +66,36 @@ ...@@ -55,8 +66,36 @@
</div> </div>
<div class=" self-center text-sm font-medium">{$i18n.t('Download Database')}</div> <div class=" self-center text-sm font-medium">{$i18n.t('Download Database')}</div>
</button> </button>
{/if} </div>
</div>
<hr class=" dark:border-gray-700 my-1" />
<button
class=" flex rounded-md py-2 px-3.5 w-full hover:bg-gray-200 dark:hover:bg-gray-800 transition"
on:click={() => {
exportAllUserChats();
}}
>
<div class=" self-center mr-3">
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path d="M2 3a1 1 0 0 1 1-1h10a1 1 0 0 1 1 1v1a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3Z" />
<path
fill-rule="evenodd"
d="M13 6H3v6a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V6ZM8.75 7.75a.75.75 0 0 0-1.5 0v2.69L6.03 9.22a.75.75 0 0 0-1.06 1.06l2.5 2.5a.75.75 0 0 0 1.06 0l2.5-2.5a.75.75 0 1 0-1.06-1.06l-1.22 1.22V7.75Z"
clip-rule="evenodd"
/>
</svg>
</div>
<div class=" self-center text-sm font-medium">
{$i18n.t('Export All Chats (All Users)')}
</div>
</button>
{/if}
</div> </div>
</div> </div>
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
chatId, chatId,
chats, chats,
config, config,
modelfiles, type Model,
models, models,
settings, settings,
showSidebar, showSidebar,
...@@ -35,12 +35,7 @@ ...@@ -35,12 +35,7 @@
import MessageInput from '$lib/components/chat/MessageInput.svelte'; import MessageInput from '$lib/components/chat/MessageInput.svelte';
import Messages from '$lib/components/chat/Messages.svelte'; import Messages from '$lib/components/chat/Messages.svelte';
import Navbar from '$lib/components/layout/Navbar.svelte'; import Navbar from '$lib/components/layout/Navbar.svelte';
import { import { OLLAMA_API_BASE_URL, OPENAI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants';
LITELLM_API_BASE_URL,
OLLAMA_API_BASE_URL,
OPENAI_API_BASE_URL,
WEBUI_BASE_URL
} from '$lib/constants';
import { createOpenAITextStream } from '$lib/apis/streaming'; import { createOpenAITextStream } from '$lib/apis/streaming';
import { queryMemory } from '$lib/apis/memories'; import { queryMemory } from '$lib/apis/memories';
import type { Writable } from 'svelte/store'; import type { Writable } from 'svelte/store';
...@@ -60,25 +55,7 @@ ...@@ -60,25 +55,7 @@
let showModelSelector = true; let showModelSelector = true;
let selectedModels = ['']; let selectedModels = [''];
let atSelectedModel = ''; let atSelectedModel: Model | undefined;
let selectedModelfile = null;
$: selectedModelfile =
selectedModels.length === 1 &&
$modelfiles.filter((modelfile) => modelfile.tagName === selectedModels[0]).length > 0
? $modelfiles.filter((modelfile) => modelfile.tagName === selectedModels[0])[0]
: null;
let selectedModelfiles = {};
$: selectedModelfiles = selectedModels.reduce((a, tagName, i, arr) => {
const modelfile =
$modelfiles.filter((modelfile) => modelfile.tagName === tagName)?.at(0) ?? undefined;
return {
...a,
...(modelfile && { [tagName]: modelfile })
};
}, {});
let chat = null; let chat = null;
let tags = []; let tags = [];
...@@ -164,6 +141,7 @@ ...@@ -164,6 +141,7 @@
if ($page.url.searchParams.get('q')) { if ($page.url.searchParams.get('q')) {
prompt = $page.url.searchParams.get('q') ?? ''; prompt = $page.url.searchParams.get('q') ?? '';
if (prompt) { if (prompt) {
await tick(); await tick();
submitPrompt(prompt); submitPrompt(prompt);
...@@ -211,7 +189,7 @@ ...@@ -211,7 +189,7 @@
await settings.set({ await settings.set({
..._settings, ..._settings,
system: chatContent.system ?? _settings.system, system: chatContent.system ?? _settings.system,
options: chatContent.options ?? _settings.options params: chatContent.options ?? _settings.params
}); });
autoScroll = true; autoScroll = true;
await tick(); await tick();
...@@ -300,7 +278,7 @@ ...@@ -300,7 +278,7 @@
models: selectedModels, models: selectedModels,
system: $settings.system ?? undefined, system: $settings.system ?? undefined,
options: { options: {
...($settings.options ?? {}) ...($settings.params ?? {})
}, },
messages: messages, messages: messages,
history: history, history: history,
...@@ -317,6 +295,7 @@ ...@@ -317,6 +295,7 @@
// Reset chat input textarea // Reset chat input textarea
prompt = ''; prompt = '';
document.getElementById('chat-textarea').style.height = '';
files = []; files = [];
// Send prompt // Send prompt
...@@ -328,75 +307,92 @@ ...@@ -328,75 +307,92 @@
const _chatId = JSON.parse(JSON.stringify($chatId)); const _chatId = JSON.parse(JSON.stringify($chatId));
await Promise.all( await Promise.all(
(modelId ? [modelId] : atSelectedModel !== '' ? [atSelectedModel.id] : selectedModels).map( (modelId
async (modelId) => { ? [modelId]
console.log('modelId', modelId); : atSelectedModel !== undefined
const model = $models.filter((m) => m.id === modelId).at(0); ? [atSelectedModel.id]
: selectedModels
if (model) { ).map(async (modelId) => {
// Create response message console.log('modelId', modelId);
let responseMessageId = uuidv4(); const model = $models.filter((m) => m.id === modelId).at(0);
let responseMessage = {
parentId: parentId, if (model) {
id: responseMessageId, // If there are image files, check if model is vision capable
childrenIds: [], const hasImages = messages.some((message) =>
role: 'assistant', message.files?.some((file) => file.type === 'image')
content: '', );
model: model.id,
userContext: null,
timestamp: Math.floor(Date.now() / 1000) // Unix epoch
};
// Add message to history and Set currentId to messageId
history.messages[responseMessageId] = responseMessage;
history.currentId = responseMessageId;
// Append messageId to childrenIds of parent message
if (parentId !== null) {
history.messages[parentId].childrenIds = [
...history.messages[parentId].childrenIds,
responseMessageId
];
}
await tick(); if (hasImages && !(model.info?.meta?.capabilities?.vision ?? true)) {
toast.error(
$i18n.t('Model {{modelName}} is not vision capable', {
modelName: model.name ?? model.id
})
);
}
let userContext = null; // Create response message
if ($settings?.memory ?? false) { let responseMessageId = uuidv4();
if (userContext === null) { let responseMessage = {
const res = await queryMemory(localStorage.token, prompt).catch((error) => { parentId: parentId,
toast.error(error); id: responseMessageId,
return null; childrenIds: [],
}); role: 'assistant',
content: '',
if (res) { model: model.id,
if (res.documents[0].length > 0) { modelName: model.name ?? model.id,
userContext = res.documents.reduce((acc, doc, index) => { userContext: null,
const createdAtTimestamp = res.metadatas[index][0].created_at; timestamp: Math.floor(Date.now() / 1000) // Unix epoch
const createdAtDate = new Date(createdAtTimestamp * 1000) };
.toISOString()
.split('T')[0]; // Add message to history and Set currentId to messageId
acc.push(`${index + 1}. [${createdAtDate}]. ${doc[0]}`); history.messages[responseMessageId] = responseMessage;
return acc; history.currentId = responseMessageId;
}, []);
} // Append messageId to childrenIds of parent message
if (parentId !== null) {
history.messages[parentId].childrenIds = [
...history.messages[parentId].childrenIds,
responseMessageId
];
}
console.log(userContext); await tick();
let userContext = null;
if ($settings?.memory ?? false) {
if (userContext === null) {
const res = await queryMemory(localStorage.token, prompt).catch((error) => {
toast.error(error);
return null;
});
if (res) {
if (res.documents[0].length > 0) {
userContext = res.documents.reduce((acc, doc, index) => {
const createdAtTimestamp = res.metadatas[index][0].created_at;
const createdAtDate = new Date(createdAtTimestamp * 1000)
.toISOString()
.split('T')[0];
acc.push(`${index + 1}. [${createdAtDate}]. ${doc[0]}`);
return acc;
}, []);
} }
console.log(userContext);
} }
} }
responseMessage.userContext = userContext; }
responseMessage.userContext = userContext;
if (model?.external) { if (model?.owned_by === 'openai') {
await sendPromptOpenAI(model, prompt, responseMessageId, _chatId); await sendPromptOpenAI(model, prompt, responseMessageId, _chatId);
} else if (model) { } else if (model) {
await sendPromptOllama(model, prompt, responseMessageId, _chatId); await sendPromptOllama(model, prompt, responseMessageId, _chatId);
}
} else {
toast.error($i18n.t(`Model {{modelId}} not found`, { modelId }));
} }
} else {
toast.error($i18n.t(`Model {{modelId}} not found`, { modelId }));
} }
) })
); );
await chats.set(await getChatList(localStorage.token)); await chats.set(await getChatList(localStorage.token));
...@@ -430,7 +426,7 @@ ...@@ -430,7 +426,7 @@
// Prepare the base message object // Prepare the base message object
const baseMessage = { const baseMessage = {
role: message.role, role: message.role,
content: arr.length - 2 !== idx ? message.content : message?.raContent ?? message.content content: message.content
}; };
// Extract and format image URLs if any exist // Extract and format image URLs if any exist
...@@ -442,7 +438,6 @@ ...@@ -442,7 +438,6 @@
if (imageUrls && imageUrls.length > 0 && message.role === 'user') { if (imageUrls && imageUrls.length > 0 && message.role === 'user') {
baseMessage.images = imageUrls; baseMessage.images = imageUrls;
} }
return baseMessage; return baseMessage;
}); });
...@@ -473,13 +468,15 @@ ...@@ -473,13 +468,15 @@
model: model, model: model,
messages: messagesBody, messages: messagesBody,
options: { options: {
...($settings.options ?? {}), ...($settings.params ?? {}),
stop: stop:
$settings?.options?.stop ?? undefined $settings?.params?.stop ?? undefined
? $settings.options.stop.map((str) => ? $settings.params.stop.map((str) =>
decodeURIComponent(JSON.parse('"' + str.replace(/\"/g, '\\"') + '"')) decodeURIComponent(JSON.parse('"' + str.replace(/\"/g, '\\"') + '"'))
) )
: undefined : undefined,
num_predict: $settings?.params?.max_tokens ?? undefined,
repeat_penalty: $settings?.params?.frequency_penalty ?? undefined
}, },
format: $settings.requestFormat ?? undefined, format: $settings.requestFormat ?? undefined,
keep_alive: $settings.keepAlive ?? undefined, keep_alive: $settings.keepAlive ?? undefined,
...@@ -605,7 +602,8 @@ ...@@ -605,7 +602,8 @@
if ($settings.saveChatHistory ?? true) { if ($settings.saveChatHistory ?? true) {
chat = await updateChatById(localStorage.token, _chatId, { chat = await updateChatById(localStorage.token, _chatId, {
messages: messages, messages: messages,
history: history history: history,
models: selectedModels
}); });
await chats.set(await getChatList(localStorage.token)); await chats.set(await getChatList(localStorage.token));
} }
...@@ -716,24 +714,21 @@ ...@@ -716,24 +714,21 @@
: message?.raContent ?? message.content : message?.raContent ?? message.content
}) })
})), })),
seed: $settings?.options?.seed ?? undefined, seed: $settings?.params?.seed ?? undefined,
stop: stop:
$settings?.options?.stop ?? undefined $settings?.params?.stop ?? undefined
? $settings.options.stop.map((str) => ? $settings.params.stop.map((str) =>
decodeURIComponent(JSON.parse('"' + str.replace(/\"/g, '\\"') + '"')) decodeURIComponent(JSON.parse('"' + str.replace(/\"/g, '\\"') + '"'))
) )
: undefined, : undefined,
temperature: $settings?.options?.temperature ?? undefined, temperature: $settings?.params?.temperature ?? undefined,
top_p: $settings?.options?.top_p ?? undefined, top_p: $settings?.params?.top_p ?? undefined,
num_ctx: $settings?.options?.num_ctx ?? undefined, frequency_penalty: $settings?.params?.frequency_penalty ?? undefined,
frequency_penalty: $settings?.options?.repeat_penalty ?? undefined, max_tokens: $settings?.params?.max_tokens ?? undefined,
max_tokens: $settings?.options?.num_predict ?? undefined,
docs: docs.length > 0 ? docs : undefined, docs: docs.length > 0 ? docs : undefined,
citations: docs.length > 0 citations: docs.length > 0
}, },
model?.source?.toLowerCase() === 'litellm' `${OPENAI_API_BASE_URL}`
? `${LITELLM_API_BASE_URL}/v1`
: `${OPENAI_API_BASE_URL}`
); );
// Wait until history/message have been updated // Wait until history/message have been updated
...@@ -797,6 +792,7 @@ ...@@ -797,6 +792,7 @@
if ($chatId == _chatId) { if ($chatId == _chatId) {
if ($settings.saveChatHistory ?? true) { if ($settings.saveChatHistory ?? true) {
chat = await updateChatById(localStorage.token, _chatId, { chat = await updateChatById(localStorage.token, _chatId, {
models: selectedModels,
messages: messages, messages: messages,
history: history history: history
}); });
...@@ -935,10 +931,8 @@ ...@@ -935,10 +931,8 @@
) + ' {{prompt}}', ) + ' {{prompt}}',
titleModelId, titleModelId,
userPrompt, userPrompt,
titleModel?.external ?? false titleModel?.owned_by === 'openai' ?? false
? titleModel?.source?.toLowerCase() === 'litellm' ? `${OPENAI_API_BASE_URL}`
? `${LITELLM_API_BASE_URL}/v1`
: `${OPENAI_API_BASE_URL}`
: `${OLLAMA_API_BASE_URL}/v1` : `${OLLAMA_API_BASE_URL}/v1`
); );
...@@ -1025,16 +1019,12 @@ ...@@ -1025,16 +1019,12 @@
<Messages <Messages
chatId={$chatId} chatId={$chatId}
{selectedModels} {selectedModels}
{selectedModelfiles}
{processing} {processing}
bind:history bind:history
bind:messages bind:messages
bind:autoScroll bind:autoScroll
bind:prompt bind:prompt
bottomPadding={files.length > 0} bottomPadding={files.length > 0}
suggestionPrompts={chatIdProp
? []
: selectedModelfile?.suggestionPrompts ?? $config.default_prompt_suggestions}
{sendPrompt} {sendPrompt}
{continueGeneration} {continueGeneration}
{regenerateResponse} {regenerateResponse}
...@@ -1048,7 +1038,8 @@ ...@@ -1048,7 +1038,8 @@
bind:files bind:files
bind:prompt bind:prompt
bind:autoScroll bind:autoScroll
bind:selectedModel={atSelectedModel} bind:atSelectedModel
{selectedModels}
{messages} {messages}
{submitPrompt} {submitPrompt}
{stopResponse} {stopResponse}
......
<script lang="ts"> <script lang="ts">
import { toast } from 'svelte-sonner'; import { toast } from 'svelte-sonner';
import { onMount, tick, getContext } from 'svelte'; import { onMount, tick, getContext } from 'svelte';
import { mobile, modelfiles, settings, showSidebar } from '$lib/stores'; import { type Model, mobile, settings, showSidebar, models } from '$lib/stores';
import { blobToFile, calculateSHA256, findWordIndices } from '$lib/utils'; import { blobToFile, calculateSHA256, findWordIndices } from '$lib/utils';
import { import {
...@@ -27,7 +27,9 @@ ...@@ -27,7 +27,9 @@
export let stopResponse: Function; export let stopResponse: Function;
export let autoScroll = true; export let autoScroll = true;
export let selectedModel = '';
export let atSelectedModel: Model | undefined;
export let selectedModels: [''];
let chatTextAreaElement: HTMLTextAreaElement; let chatTextAreaElement: HTMLTextAreaElement;
let filesInputElement; let filesInputElement;
...@@ -52,6 +54,11 @@ ...@@ -52,6 +54,11 @@
let speechRecognition; let speechRecognition;
let visionCapableModels = [];
$: visionCapableModels = [...(atSelectedModel ? [atSelectedModel] : selectedModels)].filter(
(model) => $models.find((m) => m.id === model)?.info?.meta?.capabilities?.vision ?? true
);
$: if (prompt) { $: if (prompt) {
if (chatTextAreaElement) { if (chatTextAreaElement) {
chatTextAreaElement.style.height = ''; chatTextAreaElement.style.height = '';
...@@ -358,6 +365,10 @@ ...@@ -358,6 +365,10 @@
inputFiles.forEach((file) => { inputFiles.forEach((file) => {
console.log(file, file.name.split('.').at(-1)); console.log(file, file.name.split('.').at(-1));
if (['image/gif', 'image/webp', 'image/jpeg', 'image/png'].includes(file['type'])) { if (['image/gif', 'image/webp', 'image/jpeg', 'image/png'].includes(file['type'])) {
if (visionCapableModels.length === 0) {
toast.error($i18n.t('Selected model(s) do not support image inputs'));
return;
}
let reader = new FileReader(); let reader = new FileReader();
reader.onload = (event) => { reader.onload = (event) => {
files = [ files = [
...@@ -429,8 +440,8 @@ ...@@ -429,8 +440,8 @@
<div class="fixed bottom-0 {$showSidebar ? 'left-0 md:left-[260px]' : 'left-0'} right-0"> <div class="fixed bottom-0 {$showSidebar ? 'left-0 md:left-[260px]' : 'left-0'} right-0">
<div class="w-full"> <div class="w-full">
<div class="px-2.5 md:px-16 -mb-0.5 mx-auto inset-x-0 bg-transparent flex justify-center"> <div class=" -mb-0.5 mx-auto inset-x-0 bg-transparent flex justify-center">
<div class="flex flex-col max-w-5xl w-full"> <div class="flex flex-col max-w-6xl px-2.5 md:px-6 w-full">
<div class="relative"> <div class="relative">
{#if autoScroll === false && messages.length > 0} {#if autoScroll === false && messages.length > 0}
<div class=" absolute -top-12 left-0 right-0 flex justify-center z-30"> <div class=" absolute -top-12 left-0 right-0 flex justify-center z-30">
...@@ -494,12 +505,12 @@ ...@@ -494,12 +505,12 @@
bind:chatInputPlaceholder bind:chatInputPlaceholder
{messages} {messages}
on:select={(e) => { on:select={(e) => {
selectedModel = e.detail; atSelectedModel = e.detail;
chatTextAreaElement?.focus(); chatTextAreaElement?.focus();
}} }}
/> />
{#if selectedModel !== ''} {#if atSelectedModel !== undefined}
<div <div
class="px-3 py-2.5 text-left w-full flex justify-between items-center absolute bottom-0 left-0 right-0 bg-gradient-to-t from-50% from-white dark:from-gray-900" class="px-3 py-2.5 text-left w-full flex justify-between items-center absolute bottom-0 left-0 right-0 bg-gradient-to-t from-50% from-white dark:from-gray-900"
> >
...@@ -508,21 +519,21 @@ ...@@ -508,21 +519,21 @@
crossorigin="anonymous" crossorigin="anonymous"
alt="model profile" alt="model profile"
class="size-5 max-w-[28px] object-cover rounded-full" class="size-5 max-w-[28px] object-cover rounded-full"
src={$modelfiles.find((modelfile) => modelfile.tagName === selectedModel.id) src={$models.find((model) => model.id === atSelectedModel.id)?.info?.meta
?.imageUrl ?? ?.profile_image_url ??
($i18n.language === 'dg-DG' ($i18n.language === 'dg-DG'
? `/doge.png` ? `/doge.png`
: `${WEBUI_BASE_URL}/static/favicon.png`)} : `${WEBUI_BASE_URL}/static/favicon.png`)}
/> />
<div> <div>
Talking to <span class=" font-medium">{selectedModel.name} </span> Talking to <span class=" font-medium">{atSelectedModel.name}</span>
</div> </div>
</div> </div>
<div> <div>
<button <button
class="flex items-center" class="flex items-center"
on:click={() => { on:click={() => {
selectedModel = ''; atSelectedModel = undefined;
}} }}
> >
<XMark /> <XMark />
...@@ -535,7 +546,7 @@ ...@@ -535,7 +546,7 @@
</div> </div>
<div class="bg-white dark:bg-gray-900"> <div class="bg-white dark:bg-gray-900">
<div class="max-w-6xl px-2.5 md:px-16 mx-auto inset-x-0"> <div class="max-w-6xl px-2.5 md:px-6 mx-auto inset-x-0">
<div class=" pb-2"> <div class=" pb-2">
<input <input
bind:this={filesInputElement} bind:this={filesInputElement}
...@@ -550,6 +561,12 @@ ...@@ -550,6 +561,12 @@
if ( if (
['image/gif', 'image/webp', 'image/jpeg', 'image/png'].includes(file['type']) ['image/gif', 'image/webp', 'image/jpeg', 'image/png'].includes(file['type'])
) { ) {
if (visionCapableModels.length === 0) {
toast.error($i18n.t('Selected model(s) do not support image inputs'));
inputFiles = null;
filesInputElement.value = '';
return;
}
let reader = new FileReader(); let reader = new FileReader();
reader.onload = (event) => { reader.onload = (event) => {
files = [ files = [
...@@ -589,6 +606,7 @@ ...@@ -589,6 +606,7 @@
dir={$settings?.chatDirection ?? 'LTR'} dir={$settings?.chatDirection ?? 'LTR'}
class=" flex flex-col relative w-full rounded-3xl px-1.5 bg-gray-50 dark:bg-gray-850 dark:text-gray-100" class=" flex flex-col relative w-full rounded-3xl px-1.5 bg-gray-50 dark:bg-gray-850 dark:text-gray-100"
on:submit|preventDefault={() => { on:submit|preventDefault={() => {
// check if selectedModels support image input
submitPrompt(prompt, user); submitPrompt(prompt, user);
}} }}
> >
...@@ -597,7 +615,36 @@ ...@@ -597,7 +615,36 @@
{#each files as file, fileIdx} {#each files as file, fileIdx}
<div class=" relative group"> <div class=" relative group">
{#if file.type === 'image'} {#if file.type === 'image'}
<img src={file.url} alt="input" class=" h-16 w-16 rounded-xl object-cover" /> <div class="relative">
<img
src={file.url}
alt="input"
class=" h-16 w-16 rounded-xl object-cover"
/>
{#if atSelectedModel ? visionCapableModels.length === 0 : selectedModels.length !== visionCapableModels.length}
<Tooltip
className=" absolute top-1 left-1"
content={$i18n.t('{{ models }}', {
models: [...(atSelectedModel ? [atSelectedModel] : selectedModels)]
.filter((id) => !visionCapableModels.includes(id))
.join(', ')
})}
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 24 24"
fill="currentColor"
class="size-4 fill-yellow-300"
>
<path
fill-rule="evenodd"
d="M9.401 3.003c1.155-2 4.043-2 5.197 0l7.355 12.748c1.154 2-.29 4.5-2.599 4.5H4.645c-2.309 0-3.752-2.5-2.598-4.5L9.4 3.003ZM12 8.25a.75.75 0 0 1 .75.75v3.75a.75.75 0 0 1-1.5 0V9a.75.75 0 0 1 .75-.75Zm0 8.25a.75.75 0 1 0 0-1.5.75.75 0 0 0 0 1.5Z"
clip-rule="evenodd"
/>
</svg>
</Tooltip>
{/if}
</div>
{:else if file.type === 'doc'} {:else if file.type === 'doc'}
<div <div
class="h-16 w-[15rem] flex items-center space-x-3 px-2.5 dark:bg-gray-600 rounded-xl border border-gray-200 dark:border-none" class="h-16 w-[15rem] flex items-center space-x-3 px-2.5 dark:bg-gray-600 rounded-xl border border-gray-200 dark:border-none"
...@@ -883,7 +930,7 @@ ...@@ -883,7 +930,7 @@
if (e.key === 'Escape') { if (e.key === 'Escape') {
console.log('Escape'); console.log('Escape');
selectedModel = ''; atSelectedModel = undefined;
} }
}} }}
rows="1" rows="1"
......
<script lang="ts"> <script lang="ts">
import { v4 as uuidv4 } from 'uuid'; import { v4 as uuidv4 } from 'uuid';
import { chats, config, modelfiles, settings, user as _user, mobile } from '$lib/stores'; import { chats, config, settings, user as _user, mobile } from '$lib/stores';
import { tick, getContext } from 'svelte'; import { tick, getContext } from 'svelte';
import { toast } from 'svelte-sonner'; import { toast } from 'svelte-sonner';
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
export let user = $_user; export let user = $_user;
export let prompt; export let prompt;
export let suggestionPrompts = [];
export let processing = ''; export let processing = '';
export let bottomPadding = false; export let bottomPadding = false;
export let autoScroll; export let autoScroll;
...@@ -34,7 +33,6 @@ ...@@ -34,7 +33,6 @@
export let messages = []; export let messages = [];
export let selectedModels; export let selectedModels;
export let selectedModelfiles = [];
$: if (autoScroll && bottomPadding) { $: if (autoScroll && bottomPadding) {
(async () => { (async () => {
...@@ -247,9 +245,7 @@ ...@@ -247,9 +245,7 @@
<div class="h-full flex mb-16"> <div class="h-full flex mb-16">
{#if messages.length == 0} {#if messages.length == 0}
<Placeholder <Placeholder
models={selectedModels} modelIds={selectedModels}
modelfiles={selectedModelfiles}
{suggestionPrompts}
submitPrompt={async (p) => { submitPrompt={async (p) => {
let text = p; let text = p;
...@@ -316,7 +312,6 @@ ...@@ -316,7 +312,6 @@
{#key message.id} {#key message.id}
<ResponseMessage <ResponseMessage
{message} {message}
modelfiles={selectedModelfiles}
siblings={history.messages[message.parentId]?.childrenIds ?? []} siblings={history.messages[message.parentId]?.childrenIds ?? []}
isLastMessage={messageIdx + 1 === messages.length} isLastMessage={messageIdx + 1 === messages.length}
{readOnly} {readOnly}
...@@ -348,7 +343,6 @@ ...@@ -348,7 +343,6 @@
{chatId} {chatId}
parentMessage={history.messages[message.parentId]} parentMessage={history.messages[message.parentId]}
{messageIdx} {messageIdx}
{selectedModelfiles}
{updateChatMessages} {updateChatMessages}
{confirmEditResponseMessage} {confirmEditResponseMessage}
{rateMessage} {rateMessage}
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
import hljs from 'highlight.js'; import hljs from 'highlight.js';
import 'highlight.js/styles/github-dark.min.css'; import 'highlight.js/styles/github-dark.min.css';
import { loadPyodide } from 'pyodide'; import { loadPyodide } from 'pyodide';
import { tick } from 'svelte'; import { onMount, tick } from 'svelte';
import PyodideWorker from '$lib/workers/pyodide.worker?worker'; import PyodideWorker from '$lib/workers/pyodide.worker?worker';
export let id = ''; export let id = '';
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
export let lang = ''; export let lang = '';
export let code = ''; export let code = '';
let highlightedCode = null;
let executing = false; let executing = false;
let stdout = null; let stdout = null;
...@@ -202,60 +203,60 @@ __builtins__.input = input`); ...@@ -202,60 +203,60 @@ __builtins__.input = input`);
}; };
}; };
$: highlightedCode = code ? hljs.highlightAuto(code, hljs.getLanguage(lang)?.aliases).value : ''; $: if (code) {
highlightedCode = hljs.highlightAuto(code, hljs.getLanguage(lang)?.aliases).value || code;
}
</script> </script>
{#if code} <div class="mb-4" dir="ltr">
<div class="mb-4" dir="ltr"> <div
<div class="flex justify-between bg-[#202123] text-white text-xs px-4 pt-1 pb-0.5 rounded-t-lg overflow-x-auto"
class="flex justify-between bg-[#202123] text-white text-xs px-4 pt-1 pb-0.5 rounded-t-lg overflow-x-auto" >
> <div class="p-1">{@html lang}</div>
<div class="p-1">{@html lang}</div>
<div class="flex items-center">
<div class="flex items-center"> {#if lang === 'python' || (lang === '' && checkPythonCode(code))}
{#if lang === 'python' || (lang === '' && checkPythonCode(code))} {#if executing}
{#if executing} <div class="copy-code-button bg-none border-none p-1 cursor-not-allowed">Running</div>
<div class="copy-code-button bg-none border-none p-1 cursor-not-allowed">Running</div> {:else}
{:else} <button
<button class="copy-code-button bg-none border-none p-1"
class="copy-code-button bg-none border-none p-1" on:click={() => {
on:click={() => { executePython(code);
executePython(code); }}>Run</button
}}>Run</button >
>
{/if}
{/if} {/if}
<button class="copy-code-button bg-none border-none p-1" on:click={copyCode} {/if}
>{copied ? 'Copied' : 'Copy Code'}</button <button class="copy-code-button bg-none border-none p-1" on:click={copyCode}
> >{copied ? 'Copied' : 'Copy Code'}</button
</div> >
</div> </div>
<pre
class=" hljs p-4 px-5 overflow-x-auto"
style="border-top-left-radius: 0px; border-top-right-radius: 0px; {(executing ||
stdout ||
stderr ||
result) &&
'border-bottom-left-radius: 0px; border-bottom-right-radius: 0px;'}"><code
class="language-{lang} rounded-t-none whitespace-pre">{@html highlightedCode || code}</code
></pre>
<div
id="plt-canvas-{id}"
class="bg-[#202123] text-white max-w-full overflow-x-auto scrollbar-hidden"
/>
{#if executing}
<div class="bg-[#202123] text-white px-4 py-4 rounded-b-lg">
<div class=" text-gray-500 text-xs mb-1">STDOUT/STDERR</div>
<div class="text-sm">Running...</div>
</div>
{:else if stdout || stderr || result}
<div class="bg-[#202123] text-white px-4 py-4 rounded-b-lg">
<div class=" text-gray-500 text-xs mb-1">STDOUT/STDERR</div>
<div class="text-sm">{stdout || stderr || result}</div>
</div>
{/if}
</div> </div>
{/if}
<pre
class=" hljs p-4 px-5 overflow-x-auto"
style="border-top-left-radius: 0px; border-top-right-radius: 0px; {(executing ||
stdout ||
stderr ||
result) &&
'border-bottom-left-radius: 0px; border-bottom-right-radius: 0px;'}"><code
class="language-{lang} rounded-t-none whitespace-pre">{@html highlightedCode || code}</code
></pre>
<div
id="plt-canvas-{id}"
class="bg-[#202123] text-white max-w-full overflow-x-auto scrollbar-hidden"
/>
{#if executing}
<div class="bg-[#202123] text-white px-4 py-4 rounded-b-lg">
<div class=" text-gray-500 text-xs mb-1">STDOUT/STDERR</div>
<div class="text-sm">Running...</div>
</div>
{:else if stdout || stderr || result}
<div class="bg-[#202123] text-white px-4 py-4 rounded-b-lg">
<div class=" text-gray-500 text-xs mb-1">STDOUT/STDERR</div>
<div class="text-sm">{stdout || stderr || result}</div>
</div>
{/if}
</div>
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment