"docs/source/vscode:/vscode.git/clone" did not exist on "7457aa67cb5c75132c38507080697b7cc7c4d9e6"
Commit 2aecd7d0 authored by Jonathan Rohde's avatar Jonathan Rohde
Browse files

Merge branch 'refs/heads/dev' into feat/sqlalchemy-instead-of-peewee

# Conflicts:
#	backend/requirements.txt
parents 5391f4c1 f3c1ff9e
......@@ -4,6 +4,7 @@ updates:
directory: '/backend'
schedule:
interval: weekly
target-branch: 'dev'
- package-ecosystem: 'github-actions'
directory: '/'
schedule:
......
......@@ -5,6 +5,23 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.3.7] - 2024-06-29
### Added
- **🌐 Enhanced Internationalization (i18n)**: Newly introduced Indonesian translation, and updated translations for Turkish, Chinese, and Catalan languages to improve user accessibility.
### Fixed
- **🕵️‍♂️ Browser Language Detection**: Corrected the issue where the application was not properly detecting and adapting to the browser's language settings.
- **🔐 OIDC Admin Role Assignment**: Fixed a bug where the admin role was not being assigned to the first user who signed up via OpenID Connect (OIDC).
- **💬 Chat/Completions Endpoint**: Resolved an issue where the chat/completions endpoint was non-functional when the stream option was set to False.
- **🚫 'WEBUI_AUTH' Configuration**: Addressed the problem where setting 'WEBUI_AUTH' to False was not being applied correctly.
### Changed
- **📦 Dependency Update**: Upgraded 'authlib' from version 1.3.0 to 1.3.1 to ensure better security and performance enhancements.
## [0.3.6] - 2024-06-27
### Added
......
......@@ -14,7 +14,6 @@ from fastapi import (
from fastapi.responses import StreamingResponse, JSONResponse, FileResponse
from fastapi.middleware.cors import CORSMiddleware
from faster_whisper import WhisperModel
from pydantic import BaseModel
import uuid
......@@ -277,6 +276,8 @@ def transcribe(
f.close()
if app.state.config.STT_ENGINE == "":
from faster_whisper import WhisperModel
whisper_kwargs = {
"model_size_or_path": WHISPER_MODEL,
"device": whisper_device_type,
......
......@@ -12,7 +12,6 @@ from fastapi import (
Form,
)
from fastapi.middleware.cors import CORSMiddleware
from faster_whisper import WhisperModel
from constants import ERROR_MESSAGES
from utils.utils import (
......
......@@ -153,7 +153,7 @@ async def cleanup_response(
await session.close()
async def post_streaming_url(url: str, payload: str):
async def post_streaming_url(url: str, payload: str, stream: bool = True):
r = None
try:
session = aiohttp.ClientSession(
......@@ -162,12 +162,20 @@ async def post_streaming_url(url: str, payload: str):
r = await session.post(url, data=payload)
r.raise_for_status()
return StreamingResponse(
r.content,
status_code=r.status,
headers=dict(r.headers),
background=BackgroundTask(cleanup_response, response=r, session=session),
)
if stream:
return StreamingResponse(
r.content,
status_code=r.status,
headers=dict(r.headers),
background=BackgroundTask(
cleanup_response, response=r, session=session
),
)
else:
res = await r.json()
await cleanup_response(r, session)
return res
except Exception as e:
error_detail = "Open WebUI: Server Connection Error"
if r is not None:
......@@ -963,7 +971,11 @@ async def generate_openai_chat_completion(
url = app.state.config.OLLAMA_BASE_URLS[url_idx]
log.info(f"url: {url}")
return await post_streaming_url(f"{url}/v1/chat/completions", json.dumps(payload))
return await post_streaming_url(
f"{url}/v1/chat/completions",
json.dumps(payload),
stream=payload.get("stream", False),
)
@app.get("/v1/models")
......
......@@ -48,8 +48,6 @@ import mimetypes
import uuid
import json
import sentence_transformers
from apps.webui.models.documents import (
Documents,
DocumentForm,
......@@ -190,6 +188,8 @@ def update_embedding_model(
update_model: bool = False,
):
if embedding_model and app.state.config.RAG_EMBEDDING_ENGINE == "":
import sentence_transformers
app.state.sentence_transformer_ef = sentence_transformers.SentenceTransformer(
get_model_path(embedding_model, update_model),
device=DEVICE_TYPE,
......@@ -204,6 +204,8 @@ def update_reranking_model(
update_model: bool = False,
):
if reranking_model:
import sentence_transformers
app.state.sentence_transformer_rf = sentence_transformers.CrossEncoder(
get_model_path(reranking_model, update_model),
device=DEVICE_TYPE,
......
......@@ -442,8 +442,6 @@ from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.callbacks import Callbacks
from langchain_core.pydantic_v1 import Extra
from sentence_transformers import util
class RerankCompressor(BaseDocumentCompressor):
embedding_function: Any
......@@ -468,6 +466,8 @@ class RerankCompressor(BaseDocumentCompressor):
[(query, doc.page_content) for doc in documents]
)
else:
from sentence_transformers import util
query_embedding = self.embedding_function(query)
document_embedding = self.embedding_function(
[doc.page_content for doc in documents]
......
......@@ -259,6 +259,9 @@ async def generate_function_chat_completion(form_data, user):
if isinstance(line, BaseModel):
line = line.model_dump_json()
line = f"data: {line}"
if isinstance(line, dict):
line = f"data: {json.dumps(line)}"
try:
line = line.decode("utf-8")
except:
......
......@@ -674,6 +674,13 @@ ENABLE_SIGNUP = PersistentConfig(
else os.environ.get("ENABLE_SIGNUP", "True").lower() == "true"
),
)
DEFAULT_LOCALE = PersistentConfig(
"DEFAULT_LOCALE",
"ui.default_locale",
os.environ.get("DEFAULT_LOCALE", ""),
)
DEFAULT_MODELS = PersistentConfig(
"DEFAULT_MODELS", "ui.default_models", os.environ.get("DEFAULT_MODELS", None)
)
......
......@@ -104,6 +104,7 @@ from config import (
UPLOAD_DIR,
CACHE_DIR,
STATIC_DIR,
DEFAULT_LOCALE,
ENABLE_OPENAI_API,
ENABLE_OLLAMA_API,
ENABLE_MODEL_FILTER,
......@@ -633,6 +634,8 @@ class ChatCompletionMiddleware(BaseHTTPMiddleware):
return StreamingResponse(
self.ollama_stream_wrapper(response.body_iterator, data_items),
)
return response
else:
return response
......@@ -1748,18 +1751,11 @@ async def update_pipeline_valves(
@app.get("/api/config")
async def get_app_config():
# Checking and Handling the Absence of 'ui' in CONFIG_DATA
default_locale = "en-US"
if "ui" in CONFIG_DATA:
default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
# The Rest of the Function Now Uses the Variables Defined Above
return {
"status": True,
"name": WEBUI_NAME,
"version": VERSION,
"default_locale": default_locale,
"default_locale": str(DEFAULT_LOCALE),
"default_models": webui_app.state.config.DEFAULT_MODELS,
"default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
"features": {
......@@ -1972,6 +1968,11 @@ async def oauth_callback(provider: str, request: Request, response: Response):
picture_url = ""
if not picture_url:
picture_url = "/user.png"
role = (
"admin"
if Users.get_num_users() == 0
else webui_app.state.config.DEFAULT_USER_ROLE
)
user = Auths.insert_new_auth(
email=email,
password=get_password_hash(
......@@ -1979,7 +1980,7 @@ async def oauth_callback(provider: str, request: Request, response: Response):
), # Random password, not used
name=user_data.get("name", "User"),
profile_image_url=picture_url,
role=webui_app.state.config.DEFAULT_USER_ROLE,
role=role,
oauth_sub=provider_sub,
)
......@@ -2006,7 +2007,7 @@ async def oauth_callback(provider: str, request: Request, response: Response):
# Set the cookie token
response.set_cookie(
key="token",
value=token,
value=jwt_token,
httponly=True, # Ensures the cookie is not accessible via JavaScript
)
......
......@@ -6,11 +6,11 @@ python-multipart==0.0.9
Flask==3.0.3
Flask-Cors==4.0.1
python-socketio==5.11.2
python-socketio==5.11.3
python-jose==3.3.0
passlib[bcrypt]==1.7.4
requests==2.32.2
requests==2.32.3
aiohttp==3.9.5
sqlalchemy==2.0.30
alembic==1.13.1
......@@ -32,13 +32,13 @@ openai
anthropic
google-generativeai==0.5.4
langchain==0.2.0
langchain-community==0.2.0
langchain-chroma==0.1.1
langchain==0.2.6
langchain-community==0.2.6
langchain-chroma==0.1.2
fake-useragent==1.5.1
chromadb==0.5.0
sentence-transformers==2.7.0
chromadb==0.5.3
sentence-transformers==3.0.1
pypdf==4.2.0
docx2txt==0.8
python-pptx==0.6.23
......@@ -46,7 +46,7 @@ unstructured==0.14.0
Markdown==3.6
pypandoc==1.13
pandas==2.2.2
openpyxl==3.1.2
openpyxl==3.1.5
pyxlsb==1.0.10
xlrd==2.0.1
validators==0.28.1
......@@ -60,16 +60,16 @@ rank-bm25==0.2.2
faster-whisper==1.0.2
PyJWT[crypto]==2.8.0
authlib==1.3.0
authlib==1.3.1
black==24.4.2
langfuse==2.33.0
langfuse==2.36.2
youtube-transcript-api==0.6.2
pytube==15.0.0
extract_msg
pydub
duckduckgo-search~=6.1.5
duckduckgo-search~=6.1.7
## Tests
docker~=7.1.0
......
{
"name": "open-webui",
"version": "0.3.6",
"version": "0.3.7",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "open-webui",
"version": "0.3.6",
"version": "0.3.7",
"dependencies": {
"@codemirror/lang-javascript": "^6.2.2",
"@codemirror/lang-python": "^6.1.6",
......
{
"name": "open-webui",
"version": "0.3.6",
"version": "0.3.7",
"private": true,
"scripts": {
"dev": "npm run pyodide:fetch && vite dev --host",
......
......@@ -59,7 +59,7 @@ dependencies = [
"faster-whisper==1.0.2",
"PyJWT[crypto]==2.8.0",
"authlib==1.3.0",
"authlib==1.3.1",
"black==24.4.2",
"langfuse==2.33.0",
......
......@@ -100,64 +100,68 @@
class="flex snap-x snap-mandatory overflow-x-auto scrollbar-hidden"
id="responses-container-{parentMessage.id}"
>
{#each Object.keys(groupedMessages) as model}
{#if groupedMessagesIdx[model] !== undefined && groupedMessages[model].messages.length > 0}
<!-- svelte-ignore a11y-no-static-element-interactions -->
<!-- svelte-ignore a11y-click-events-have-key-events -->
<div
class=" snap-center min-w-80 w-full max-w-full m-1 border {history.messages[
currentMessageId
].model === model
? 'border-gray-100 dark:border-gray-850 border-[1.5px]'
: 'border-gray-50 dark:border-gray-850 '} transition p-5 rounded-3xl"
on:click={() => {
currentMessageId = groupedMessages[model].messages[groupedMessagesIdx[model]].id;
let messageId = groupedMessages[model].messages[groupedMessagesIdx[model]].id;
console.log(messageId);
let messageChildrenIds = history.messages[messageId].childrenIds;
while (messageChildrenIds.length !== 0) {
messageId = messageChildrenIds.at(-1);
messageChildrenIds = history.messages[messageId].childrenIds;
}
history.currentId = messageId;
dispatch('change');
}}
>
<ResponseMessage
message={groupedMessages[model].messages[groupedMessagesIdx[model]]}
siblings={groupedMessages[model].messages.map((m) => m.id)}
isLastMessage={true}
{updateChatMessages}
{confirmEditResponseMessage}
showPreviousMessage={() => showPreviousMessage(model)}
showNextMessage={() => showNextMessage(model)}
{readOnly}
{rateMessage}
{copyToClipboard}
{continueGeneration}
regenerateResponse={async (message) => {
regenerateResponse(message);
await tick();
groupedMessagesIdx[model] = groupedMessages[model].messages.length - 1;
{#key currentMessageId}
{#each Object.keys(groupedMessages) as model}
{#if groupedMessagesIdx[model] !== undefined && groupedMessages[model].messages.length > 0}
<!-- svelte-ignore a11y-no-static-element-interactions -->
<!-- svelte-ignore a11y-click-events-have-key-events -->
{@const message = groupedMessages[model].messages[groupedMessagesIdx[model]]}
<div
class=" snap-center min-w-80 w-full max-w-full m-1 border {history.messages[
currentMessageId
].model === model
? 'border-gray-100 dark:border-gray-800 border-[1.5px]'
: 'border-gray-50 dark:border-gray-850 '} transition p-5 rounded-3xl"
on:click={() => {
if (currentMessageId != message.id) {
currentMessageId = message.id;
let messageId = message.id;
console.log(messageId);
//
let messageChildrenIds = history.messages[messageId].childrenIds;
while (messageChildrenIds.length !== 0) {
messageId = messageChildrenIds.at(-1);
messageChildrenIds = history.messages[messageId].childrenIds;
}
history.currentId = messageId;
dispatch('change');
}
}}
on:save={async (e) => {
console.log('save', e);
const message = e.detail;
history.messages[message.id] = message;
await updateChatById(localStorage.token, chatId, {
messages: messages,
history: history
});
}}
/>
</div>
{/if}
{/each}
>
<ResponseMessage
message={groupedMessages[model].messages[groupedMessagesIdx[model]]}
siblings={groupedMessages[model].messages.map((m) => m.id)}
isLastMessage={true}
{updateChatMessages}
{confirmEditResponseMessage}
showPreviousMessage={() => showPreviousMessage(model)}
showNextMessage={() => showNextMessage(model)}
{readOnly}
{rateMessage}
{copyToClipboard}
{continueGeneration}
regenerateResponse={async (message) => {
regenerateResponse(message);
await tick();
groupedMessagesIdx[model] = groupedMessages[model].messages.length - 1;
}}
on:save={async (e) => {
console.log('save', e);
const message = e.detail;
history.messages[message.id] = message;
await updateChatById(localStorage.token, chatId, {
messages: messages,
history: history
});
}}
/>
</div>
{/if}
{/each}
{/key}
</div>
</div>
......@@ -27,61 +27,73 @@
}
let codeEditor;
let boilerplate = `from pydantic import BaseModel
let boilerplate = `"""
title: Example Filter
author: open-webui
author_url: https://github.com/open-webui
funding_url: https://github.com/open-webui
version: 0.1
"""
from pydantic import BaseModel, Field
from typing import Optional
class Filter:
class Valves(BaseModel):
max_turns: int = 4
priority: int = Field(
default=0, description="Priority level for the filter operations."
)
max_turns: int = Field(
default=8, description="Maximum allowable conversation turns for a user."
)
pass
class UserValves(BaseModel):
max_turns: int = Field(
default=4, description="Maximum allowable conversation turns for a user."
)
pass
def __init__(self):
# Indicates custom file handling logic. This flag helps disengage default routines in favor of custom
# implementations, informing the WebUI to defer file-related operations to designated methods within this class.
# Alternatively, you can remove the files directly from the body in from the inlet hook
self.file_handler = True
# self.file_handler = True
# Initialize 'valves' with specific configurations. Using 'Valves' instance helps encapsulate settings,
# which ensures settings are managed cohesively and not confused with operational flags like 'file_handler'.
self.valves = self.Valves(**{"max_turns": 2})
self.valves = self.Valves()
pass
def inlet(self, body: dict, user: Optional[dict] = None) -> dict:
def inlet(self, body: dict, __user__: Optional[dict] = None) -> dict:
# Modify the request body or validate it before processing by the chat completion API.
# This function is the pre-processor for the API where various checks on the input can be performed.
# It can also modify the request before sending it to the API.
print(f"inlet:{__name__}")
print(f"inlet:body:{body}")
print(f"inlet:user:{user}")
print(f"inlet:user:{__user__}")
if user.get("role", "admin") in ["user", "admin"]:
if __user__.get("role", "admin") in ["user", "admin"]:
messages = body.get("messages", [])
if len(messages) > self.valves.max_turns:
max_turns = min(__user__["valves"].max_turns, self.valves.max_turns)
if len(messages) > max_turns:
raise Exception(
f"Conversation turn limit exceeded. Max turns: {self.valves.max_turns}"
f"Conversation turn limit exceeded. Max turns: {max_turns}"
)
return body
def outlet(self, body: dict, user: Optional[dict] = None) -> dict:
def outlet(self, body: dict, __user__: Optional[dict] = None) -> dict:
# Modify or analyze the response body after processing by the API.
# This function is the post-processor for the API, which can be used to modify the response
# or perform additional checks and analytics.
print(f"outlet:{__name__}")
print(f"outlet:body:{body}")
print(f"outlet:user:{user}")
messages = [
{
**message,
"content": f"{message['content']} - @@Modified from Filter Outlet",
}
for message in body.get("messages", [])
]
return {"messages": messages}
print(f"outlet:user:{__user__}")
return body
`;
const _boilerplate = `from pydantic import BaseModel
......
This diff is collapsed.
This diff is collapsed.
......@@ -63,6 +63,10 @@
"code": "hr-HR",
"title": "Croatian (Hrvatski)"
},
{
"code": "id-ID",
"title": "Indonesian (Bahasa Indonesia)"
},
{
"code": "it-IT",
"title": "Italian (Italiano)"
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment