Unverified Commit a02ba52d authored by Que Nguyen's avatar Que Nguyen Committed by GitHub
Browse files

Merge branch 'dev' into searxng

parents 7b5f434a 1275371e
...@@ -37,7 +37,7 @@ Open WebUI is an [extensible](https://github.com/open-webui/pipelines), feature- ...@@ -37,7 +37,7 @@ Open WebUI is an [extensible](https://github.com/open-webui/pipelines), feature-
- 📚 **Local RAG Integration**: Dive into the future of chat interactions with groundbreaking Retrieval Augmented Generation (RAG) support. This feature seamlessly integrates document interactions into your chat experience. You can load documents directly into the chat or add files to your document library, effortlessly accessing them using the `#` command before a query. - 📚 **Local RAG Integration**: Dive into the future of chat interactions with groundbreaking Retrieval Augmented Generation (RAG) support. This feature seamlessly integrates document interactions into your chat experience. You can load documents directly into the chat or add files to your document library, effortlessly accessing them using the `#` command before a query.
- 🔍 **Web Search for RAG**: Perform web searches using providers like `SearXNG`, `Google PSE`, `Brave Search`, `serpstack`, `serper`, and `Serply` and inject the results directly into your chat experience. - 🔍 **Web Search for RAG**: Perform web searches using providers like `SearXNG`, `Google PSE`, `Brave Search`, `serpstack`, `serper`, `Serply`, `DuckDuckGo` and `TavilySearch` and inject the results directly into your chat experience.
- 🌐 **Web Browsing Capability**: Seamlessly integrate websites into your chat experience using the `#` command followed by a URL. This feature allows you to incorporate web content directly into your conversations, enhancing the richness and depth of your interactions. - 🌐 **Web Browsing Capability**: Seamlessly integrate websites into your chat experience using the `#` command followed by a URL. This feature allows you to incorporate web content directly into your conversations, enhancing the richness and depth of your interactions.
......
...@@ -18,6 +18,10 @@ If you're experiencing connection issues, it’s often due to the WebUI docker c ...@@ -18,6 +18,10 @@ If you're experiencing connection issues, it’s often due to the WebUI docker c
docker run -d --network=host -v open-webui:/app/backend/data -e OLLAMA_BASE_URL=http://127.0.0.1:11434 --name open-webui --restart always ghcr.io/open-webui/open-webui:main docker run -d --network=host -v open-webui:/app/backend/data -e OLLAMA_BASE_URL=http://127.0.0.1:11434 --name open-webui --restart always ghcr.io/open-webui/open-webui:main
``` ```
### Error on Slow Reponses for Ollama
Open WebUI has a default timeout of 5 minutes for Ollama to finish generating the response. If needed, this can be adjusted via the environment variable AIOHTTP_CLIENT_TIMEOUT, which sets the timeout in seconds.
### General Connection Errors ### General Connection Errors
**Ensure Ollama Version is Up-to-Date**: Always start by checking that you have the latest version of Ollama. Visit [Ollama's official site](https://ollama.com/) for the latest updates. **Ensure Ollama Version is Up-to-Date**: Always start by checking that you have the latest version of Ollama. Visit [Ollama's official site](https://ollama.com/) for the latest updates.
......
...@@ -46,6 +46,7 @@ from config import ( ...@@ -46,6 +46,7 @@ from config import (
SRC_LOG_LEVELS, SRC_LOG_LEVELS,
OLLAMA_BASE_URLS, OLLAMA_BASE_URLS,
ENABLE_OLLAMA_API, ENABLE_OLLAMA_API,
AIOHTTP_CLIENT_TIMEOUT,
ENABLE_MODEL_FILTER, ENABLE_MODEL_FILTER,
MODEL_FILTER_LIST, MODEL_FILTER_LIST,
UPLOAD_DIR, UPLOAD_DIR,
...@@ -154,7 +155,9 @@ async def cleanup_response( ...@@ -154,7 +155,9 @@ async def cleanup_response(
async def post_streaming_url(url: str, payload: str): async def post_streaming_url(url: str, payload: str):
r = None r = None
try: try:
session = aiohttp.ClientSession(trust_env=True) session = aiohttp.ClientSession(
trust_env=True, timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT)
)
r = await session.post(url, data=payload) r = await session.post(url, data=payload)
r.raise_for_status() r.raise_for_status()
...@@ -751,6 +754,14 @@ async def generate_chat_completion( ...@@ -751,6 +754,14 @@ async def generate_chat_completion(
if model_info.params.get("num_ctx", None): if model_info.params.get("num_ctx", None):
payload["options"]["num_ctx"] = model_info.params.get("num_ctx", None) payload["options"]["num_ctx"] = model_info.params.get("num_ctx", None)
if model_info.params.get("num_batch", None):
payload["options"]["num_batch"] = model_info.params.get(
"num_batch", None
)
if model_info.params.get("num_keep", None):
payload["options"]["num_keep"] = model_info.params.get("num_keep", None)
if model_info.params.get("repeat_last_n", None): if model_info.params.get("repeat_last_n", None):
payload["options"]["repeat_last_n"] = model_info.params.get( payload["options"]["repeat_last_n"] = model_info.params.get(
"repeat_last_n", None "repeat_last_n", None
......
...@@ -73,6 +73,7 @@ from apps.rag.search.serper import search_serper ...@@ -73,6 +73,7 @@ from apps.rag.search.serper import search_serper
from apps.rag.search.serpstack import search_serpstack from apps.rag.search.serpstack import search_serpstack
from apps.rag.search.serply import search_serply from apps.rag.search.serply import search_serply
from apps.rag.search.duckduckgo import search_duckduckgo from apps.rag.search.duckduckgo import search_duckduckgo
from apps.rag.search.tavily import search_tavily
from utils.misc import ( from utils.misc import (
calculate_sha256, calculate_sha256,
...@@ -120,6 +121,7 @@ from config import ( ...@@ -120,6 +121,7 @@ from config import (
SERPSTACK_HTTPS, SERPSTACK_HTTPS,
SERPER_API_KEY, SERPER_API_KEY,
SERPLY_API_KEY, SERPLY_API_KEY,
TAVILY_API_KEY,
RAG_WEB_SEARCH_RESULT_COUNT, RAG_WEB_SEARCH_RESULT_COUNT,
RAG_WEB_SEARCH_CONCURRENT_REQUESTS, RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
RAG_EMBEDDING_OPENAI_BATCH_SIZE, RAG_EMBEDDING_OPENAI_BATCH_SIZE,
...@@ -174,6 +176,7 @@ app.state.config.SERPSTACK_API_KEY = SERPSTACK_API_KEY ...@@ -174,6 +176,7 @@ app.state.config.SERPSTACK_API_KEY = SERPSTACK_API_KEY
app.state.config.SERPSTACK_HTTPS = SERPSTACK_HTTPS app.state.config.SERPSTACK_HTTPS = SERPSTACK_HTTPS
app.state.config.SERPER_API_KEY = SERPER_API_KEY app.state.config.SERPER_API_KEY = SERPER_API_KEY
app.state.config.SERPLY_API_KEY = SERPLY_API_KEY app.state.config.SERPLY_API_KEY = SERPLY_API_KEY
app.state.config.TAVILY_API_KEY = TAVILY_API_KEY
app.state.config.RAG_WEB_SEARCH_RESULT_COUNT = RAG_WEB_SEARCH_RESULT_COUNT app.state.config.RAG_WEB_SEARCH_RESULT_COUNT = RAG_WEB_SEARCH_RESULT_COUNT
app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS = RAG_WEB_SEARCH_CONCURRENT_REQUESTS app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS = RAG_WEB_SEARCH_CONCURRENT_REQUESTS
...@@ -402,6 +405,7 @@ async def get_rag_config(user=Depends(get_admin_user)): ...@@ -402,6 +405,7 @@ async def get_rag_config(user=Depends(get_admin_user)):
"serpstack_https": app.state.config.SERPSTACK_HTTPS, "serpstack_https": app.state.config.SERPSTACK_HTTPS,
"serper_api_key": app.state.config.SERPER_API_KEY, "serper_api_key": app.state.config.SERPER_API_KEY,
"serply_api_key": app.state.config.SERPLY_API_KEY, "serply_api_key": app.state.config.SERPLY_API_KEY,
"tavily_api_key": app.state.config.TAVILY_API_KEY,
"result_count": app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, "result_count": app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
"concurrent_requests": app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS, "concurrent_requests": app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
}, },
...@@ -430,6 +434,7 @@ class WebSearchConfig(BaseModel): ...@@ -430,6 +434,7 @@ class WebSearchConfig(BaseModel):
serpstack_https: Optional[bool] = None serpstack_https: Optional[bool] = None
serper_api_key: Optional[str] = None serper_api_key: Optional[str] = None
serply_api_key: Optional[str] = None serply_api_key: Optional[str] = None
tavily_api_key: Optional[str] = None
result_count: Optional[int] = None result_count: Optional[int] = None
concurrent_requests: Optional[int] = None concurrent_requests: Optional[int] = None
...@@ -481,6 +486,7 @@ async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_ ...@@ -481,6 +486,7 @@ async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_
app.state.config.SERPSTACK_HTTPS = form_data.web.search.serpstack_https app.state.config.SERPSTACK_HTTPS = form_data.web.search.serpstack_https
app.state.config.SERPER_API_KEY = form_data.web.search.serper_api_key app.state.config.SERPER_API_KEY = form_data.web.search.serper_api_key
app.state.config.SERPLY_API_KEY = form_data.web.search.serply_api_key app.state.config.SERPLY_API_KEY = form_data.web.search.serply_api_key
app.state.config.TAVILY_API_KEY = form_data.web.search.tavily_api_key
app.state.config.RAG_WEB_SEARCH_RESULT_COUNT = form_data.web.search.result_count app.state.config.RAG_WEB_SEARCH_RESULT_COUNT = form_data.web.search.result_count
app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS = ( app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS = (
form_data.web.search.concurrent_requests form_data.web.search.concurrent_requests
...@@ -510,6 +516,7 @@ async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_ ...@@ -510,6 +516,7 @@ async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_
"serpstack_https": app.state.config.SERPSTACK_HTTPS, "serpstack_https": app.state.config.SERPSTACK_HTTPS,
"serper_api_key": app.state.config.SERPER_API_KEY, "serper_api_key": app.state.config.SERPER_API_KEY,
"serply_api_key": app.state.config.SERPLY_API_KEY, "serply_api_key": app.state.config.SERPLY_API_KEY,
"tavily_api_key": app.state.config.TAVILY_API_KEY,
"result_count": app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, "result_count": app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
"concurrent_requests": app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS, "concurrent_requests": app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
}, },
...@@ -758,7 +765,7 @@ def search_web(engine: str, query: str) -> list[SearchResult]: ...@@ -758,7 +765,7 @@ def search_web(engine: str, query: str) -> list[SearchResult]:
- SERPSTACK_API_KEY - SERPSTACK_API_KEY
- SERPER_API_KEY - SERPER_API_KEY
- SERPLY_API_KEY - SERPLY_API_KEY
- TAVILY_API_KEY
Args: Args:
query (str): The query to search for query (str): The query to search for
""" """
...@@ -833,6 +840,15 @@ def search_web(engine: str, query: str) -> list[SearchResult]: ...@@ -833,6 +840,15 @@ def search_web(engine: str, query: str) -> list[SearchResult]:
raise Exception("No SERPLY_API_KEY found in environment variables") raise Exception("No SERPLY_API_KEY found in environment variables")
elif engine == "duckduckgo": elif engine == "duckduckgo":
return search_duckduckgo(query, app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, app.state.config.RAG_WEB_SEARCH_WHITE_LIST_DOMAINS) return search_duckduckgo(query, app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, app.state.config.RAG_WEB_SEARCH_WHITE_LIST_DOMAINS)
elif engine == "tavily":
if app.state.config.TAVILY_API_KEY:
return search_tavily(
app.state.config.TAVILY_API_KEY,
query,
app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
)
else:
raise Exception("No TAVILY_API_KEY found in environment variables")
else: else:
raise Exception("No search engine API key found in environment variables") raise Exception("No search engine API key found in environment variables")
......
import logging
import requests
from apps.rag.search.main import SearchResult
from config import SRC_LOG_LEVELS
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["RAG"])
def search_tavily(api_key: str, query: str, count: int) -> list[SearchResult]:
"""Search using Tavily's Search API and return the results as a list of SearchResult objects.
Args:
api_key (str): A Tavily Search API key
query (str): The query to search for
Returns:
List[SearchResult]: A list of search results
"""
url = "https://api.tavily.com/search"
data = {"query": query, "api_key": api_key}
response = requests.post(url, json=data)
response.raise_for_status()
json_response = response.json()
raw_search_results = json_response.get("results", [])
return [
SearchResult(
link=result["url"],
title=result.get("title", ""),
snippet=result.get("content"),
)
for result in raw_search_results[:count]
]
...@@ -65,6 +65,20 @@ class MemoriesTable: ...@@ -65,6 +65,20 @@ class MemoriesTable:
else: else:
return None return None
def update_memory_by_id(
self,
id: str,
content: str,
) -> Optional[MemoryModel]:
try:
memory = Memory.get(Memory.id == id)
memory.content = content
memory.updated_at = int(time.time())
memory.save()
return MemoryModel(**model_to_dict(memory))
except:
return None
def get_memories(self) -> List[MemoryModel]: def get_memories(self) -> List[MemoryModel]:
try: try:
memories = Memory.select() memories = Memory.select()
......
...@@ -44,6 +44,10 @@ class AddMemoryForm(BaseModel): ...@@ -44,6 +44,10 @@ class AddMemoryForm(BaseModel):
content: str content: str
class MemoryUpdateModel(BaseModel):
content: Optional[str] = None
@router.post("/add", response_model=Optional[MemoryModel]) @router.post("/add", response_model=Optional[MemoryModel])
async def add_memory( async def add_memory(
request: Request, form_data: AddMemoryForm, user=Depends(get_verified_user) request: Request, form_data: AddMemoryForm, user=Depends(get_verified_user)
...@@ -62,6 +66,34 @@ async def add_memory( ...@@ -62,6 +66,34 @@ async def add_memory(
return memory return memory
@router.post("/{memory_id}/update", response_model=Optional[MemoryModel])
async def update_memory_by_id(
memory_id: str,
request: Request,
form_data: MemoryUpdateModel,
user=Depends(get_verified_user),
):
memory = Memories.update_memory_by_id(memory_id, form_data.content)
if memory is None:
raise HTTPException(status_code=404, detail="Memory not found")
if form_data.content is not None:
memory_embedding = request.app.state.EMBEDDING_FUNCTION(form_data.content)
collection = CHROMA_CLIENT.get_or_create_collection(
name=f"user-memory-{user.id}"
)
collection.upsert(
documents=[form_data.content],
ids=[memory.id],
embeddings=[memory_embedding],
metadatas=[
{"created_at": memory.created_at, "updated_at": memory.updated_at}
],
)
return memory
############################ ############################
# QueryMemory # QueryMemory
############################ ############################
......
...@@ -425,6 +425,7 @@ OLLAMA_API_BASE_URL = os.environ.get( ...@@ -425,6 +425,7 @@ OLLAMA_API_BASE_URL = os.environ.get(
) )
OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "") OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "")
AIOHTTP_CLIENT_TIMEOUT = int(os.environ.get("AIOHTTP_CLIENT_TIMEOUT", "300"))
K8S_FLAG = os.environ.get("K8S_FLAG", "") K8S_FLAG = os.environ.get("K8S_FLAG", "")
USE_OLLAMA_DOCKER = os.environ.get("USE_OLLAMA_DOCKER", "false") USE_OLLAMA_DOCKER = os.environ.get("USE_OLLAMA_DOCKER", "false")
...@@ -951,6 +952,11 @@ SERPLY_API_KEY = PersistentConfig( ...@@ -951,6 +952,11 @@ SERPLY_API_KEY = PersistentConfig(
os.getenv("SERPLY_API_KEY", ""), os.getenv("SERPLY_API_KEY", ""),
) )
TAVILY_API_KEY = PersistentConfig(
"TAVILY_API_KEY",
"rag.web.search.tavily_api_key",
os.getenv("TAVILY_API_KEY", ""),
)
RAG_WEB_SEARCH_RESULT_COUNT = PersistentConfig( RAG_WEB_SEARCH_RESULT_COUNT = PersistentConfig(
"RAG_WEB_SEARCH_RESULT_COUNT", "RAG_WEB_SEARCH_RESULT_COUNT",
......
...@@ -494,6 +494,9 @@ def filter_pipeline(payload, user): ...@@ -494,6 +494,9 @@ def filter_pipeline(payload, user):
if "title" in payload: if "title" in payload:
del payload["title"] del payload["title"]
if "task" in payload:
del payload["task"]
return payload return payload
...@@ -835,6 +838,71 @@ async def generate_search_query(form_data: dict, user=Depends(get_verified_user) ...@@ -835,6 +838,71 @@ async def generate_search_query(form_data: dict, user=Depends(get_verified_user)
"messages": [{"role": "user", "content": content}], "messages": [{"role": "user", "content": content}],
"stream": False, "stream": False,
"max_tokens": 30, "max_tokens": 30,
"task": True,
}
print(payload)
try:
payload = filter_pipeline(payload, user)
except Exception as e:
return JSONResponse(
status_code=e.args[0],
content={"detail": e.args[1]},
)
if model["owned_by"] == "ollama":
return await generate_ollama_chat_completion(
OpenAIChatCompletionForm(**payload), user=user
)
else:
return await generate_openai_chat_completion(payload, user=user)
@app.post("/api/task/emoji/completions")
async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
print("generate_emoji")
model_id = form_data["model"]
if model_id not in app.state.MODELS:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Model not found",
)
# Check if the user has a custom task model
# If the user has a custom task model, use that model
if app.state.MODELS[model_id]["owned_by"] == "ollama":
if app.state.config.TASK_MODEL:
task_model_id = app.state.config.TASK_MODEL
if task_model_id in app.state.MODELS:
model_id = task_model_id
else:
if app.state.config.TASK_MODEL_EXTERNAL:
task_model_id = app.state.config.TASK_MODEL_EXTERNAL
if task_model_id in app.state.MODELS:
model_id = task_model_id
print(model_id)
model = app.state.MODELS[model_id]
template = '''
Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
Message: """{{prompt}}"""
'''
content = title_generation_template(
template, form_data["prompt"], user.model_dump()
)
payload = {
"model": model_id,
"messages": [{"role": "user", "content": content}],
"stream": False,
"max_tokens": 4,
"chat_id": form_data.get("chat_id", None),
"task": True,
} }
print(payload) print(payload)
......
...@@ -28,6 +28,10 @@ math { ...@@ -28,6 +28,10 @@ math {
@apply rounded-lg; @apply rounded-lg;
} }
.markdown a {
@apply underline;
}
ol > li { ol > li {
counter-increment: list-number; counter-increment: list-number;
display: block; display: block;
......
...@@ -205,6 +205,54 @@ export const generateTitle = async ( ...@@ -205,6 +205,54 @@ export const generateTitle = async (
return res?.choices[0]?.message?.content.replace(/["']/g, '') ?? 'New Chat'; return res?.choices[0]?.message?.content.replace(/["']/g, '') ?? 'New Chat';
}; };
export const generateEmoji = async (
token: string = '',
model: string,
prompt: string,
chat_id?: string
) => {
let error = null;
const res = await fetch(`${WEBUI_BASE_URL}/api/task/emoji/completions`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
Authorization: `Bearer ${token}`
},
body: JSON.stringify({
model: model,
prompt: prompt,
...(chat_id && { chat_id: chat_id })
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
}
return null;
});
if (error) {
throw error;
}
const response = res?.choices[0]?.message?.content.replace(/["']/g, '') ?? null;
if (response) {
if (/\p{Extended_Pictographic}/u.test(response)) {
return response.match(/\p{Extended_Pictographic}/gu)[0];
}
}
return null;
};
export const generateSearchQuery = async ( export const generateSearchQuery = async (
token: string = '', token: string = '',
model: string, model: string,
......
...@@ -3,7 +3,7 @@ import { WEBUI_API_BASE_URL } from '$lib/constants'; ...@@ -3,7 +3,7 @@ import { WEBUI_API_BASE_URL } from '$lib/constants';
export const getMemories = async (token: string) => { export const getMemories = async (token: string) => {
let error = null; let error = null;
const res = await fetch(`${WEBUI_API_BASE_URL}/memories`, { const res = await fetch(`${WEBUI_API_BASE_URL}/memories/`, {
method: 'GET', method: 'GET',
headers: { headers: {
Accept: 'application/json', Accept: 'application/json',
...@@ -59,6 +59,37 @@ export const addNewMemory = async (token: string, content: string) => { ...@@ -59,6 +59,37 @@ export const addNewMemory = async (token: string, content: string) => {
return res; return res;
}; };
export const updateMemoryById = async (token: string, id: string, content: string) => {
let error = null;
const res = await fetch(`${WEBUI_API_BASE_URL}/memories/${id}/update`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
authorization: `Bearer ${token}`
},
body: JSON.stringify({
content: content
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
error = err.detail;
console.log(err);
return null;
});
if (error) {
throw error;
}
return res;
};
export const queryMemory = async (token: string, content: string) => { export const queryMemory = async (token: string, content: string) => {
let error = null; let error = null;
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
getRAGConfig, getRAGConfig,
updateRAGConfig updateRAGConfig
} from '$lib/apis/rag'; } from '$lib/apis/rag';
import ResetUploadDirConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
import ResetVectorDBConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
import { documents, models } from '$lib/stores'; import { documents, models } from '$lib/stores';
import { onMount, getContext } from 'svelte'; import { onMount, getContext } from 'svelte';
...@@ -213,6 +215,34 @@ ...@@ -213,6 +215,34 @@
}); });
</script> </script>
<ResetUploadDirConfirmDialog
bind:show={showResetUploadDirConfirm}
on:confirm={() => {
const res = resetUploadDir(localStorage.token).catch((error) => {
toast.error(error);
return null;
});
if (res) {
toast.success($i18n.t('Success'));
}
}}
/>
<ResetVectorDBConfirmDialog
bind:show={showResetConfirm}
on:confirm={() => {
const res = resetVectorDB(localStorage.token).catch((error) => {
toast.error(error);
return null;
});
if (res) {
toast.success($i18n.t('Success'));
}
}}
/>
<form <form
class="flex flex-col h-full justify-between space-y-3 text-sm" class="flex flex-col h-full justify-between space-y-3 text-sm"
on:submit|preventDefault={() => { on:submit|preventDefault={() => {
...@@ -640,199 +670,56 @@ ...@@ -640,199 +670,56 @@
<hr class=" dark:border-gray-850" /> <hr class=" dark:border-gray-850" />
<div> <div>
{#if showResetUploadDirConfirm} <button
<div class="flex justify-between rounded-md items-center py-2 px-3.5 w-full transition"> class=" flex rounded-xl py-2 px-3.5 w-full hover:bg-gray-200 dark:hover:bg-gray-800 transition"
<div class="flex items-center space-x-3"> on:click={() => {
<svg showResetUploadDirConfirm = true;
xmlns="http://www.w3.org/2000/svg" }}
viewBox="0 0 24 24" type="button"
fill="currentColor" >
class="size-4" <div class=" self-center mr-3">
> <svg
<path xmlns="http://www.w3.org/2000/svg"
fill-rule="evenodd" viewBox="0 0 24 24"
d="M5.625 1.5H9a3.75 3.75 0 0 1 3.75 3.75v1.875c0 1.036.84 1.875 1.875 1.875H16.5a3.75 3.75 0 0 1 3.75 3.75v7.875c0 1.035-.84 1.875-1.875 1.875H5.625a1.875 1.875 0 0 1-1.875-1.875V3.375c0-1.036.84-1.875 1.875-1.875ZM9.75 14.25a.75.75 0 0 0 0 1.5H15a.75.75 0 0 0 0-1.5H9.75Z" fill="currentColor"
clip-rule="evenodd" class="size-4"
/> >
<path <path
d="M14.25 5.25a5.23 5.23 0 0 0-1.279-3.434 9.768 9.768 0 0 1 6.963 6.963A5.23 5.23 0 0 0 16.5 7.5h-1.875a.375.375 0 0 1-.375-.375V5.25Z" fill-rule="evenodd"
/> d="M5.625 1.5H9a3.75 3.75 0 0 1 3.75 3.75v1.875c0 1.036.84 1.875 1.875 1.875H16.5a3.75 3.75 0 0 1 3.75 3.75v7.875c0 1.035-.84 1.875-1.875 1.875H5.625a1.875 1.875 0 0 1-1.875-1.875V3.375c0-1.036.84-1.875 1.875-1.875ZM9.75 14.25a.75.75 0 0 0 0 1.5H15a.75.75 0 0 0 0-1.5H9.75Z"
</svg> clip-rule="evenodd"
<span>{$i18n.t('Are you sure?')}</span> />
</div> <path
d="M14.25 5.25a5.23 5.23 0 0 0-1.279-3.434 9.768 9.768 0 0 1 6.963 6.963A5.23 5.23 0 0 0 16.5 7.5h-1.875a.375.375 0 0 1-.375-.375V5.25Z"
<div class="flex space-x-1.5 items-center"> />
<button </svg>
class="hover:text-white transition"
on:click={() => {
const res = resetUploadDir(localStorage.token).catch((error) => {
toast.error(error);
return null;
});
if (res) {
toast.success($i18n.t('Success'));
}
showResetUploadDirConfirm = false;
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M16.704 4.153a.75.75 0 01.143 1.052l-8 10.5a.75.75 0 01-1.127.075l-4.5-4.5a.75.75 0 011.06-1.06l3.894 3.893 7.48-9.817a.75.75 0 011.05-.143z"
clip-rule="evenodd"
/>
</svg>
</button>
<button
class="hover:text-white transition"
type="button"
on:click={() => {
showResetUploadDirConfirm = false;
}}
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-4 h-4"
>
<path
d="M6.28 5.22a.75.75 0 00-1.06 1.06L8.94 10l-3.72 3.72a.75.75 0 101.06 1.06L10 11.06l3.72 3.72a.75.75 0 101.06-1.06L11.06 10l3.72-3.72a.75.75 0 00-1.06-1.06L10 8.94 6.28 5.22z"
/>
</svg>
</button>
</div>
</div> </div>
{:else} <div class=" self-center text-sm font-medium">{$i18n.t('Reset Upload Directory')}</div>
<button </button>
class=" flex rounded-xl py-2 px-3.5 w-full hover:bg-gray-200 dark:hover:bg-gray-800 transition"
on:click={() => { <button
showResetUploadDirConfirm = true; class=" flex rounded-xl py-2 px-3.5 w-full hover:bg-gray-200 dark:hover:bg-gray-800 transition"
}} on:click={() => {
type="button" showResetConfirm = true;
> }}
<div class=" self-center mr-3"> type="button"
<svg >
xmlns="http://www.w3.org/2000/svg" <div class=" self-center mr-3">
viewBox="0 0 24 24" <svg
fill="currentColor" xmlns="http://www.w3.org/2000/svg"
class="size-4" viewBox="0 0 16 16"
> fill="currentColor"
<path class="w-4 h-4"
fill-rule="evenodd" >
d="M5.625 1.5H9a3.75 3.75 0 0 1 3.75 3.75v1.875c0 1.036.84 1.875 1.875 1.875H16.5a3.75 3.75 0 0 1 3.75 3.75v7.875c0 1.035-.84 1.875-1.875 1.875H5.625a1.875 1.875 0 0 1-1.875-1.875V3.375c0-1.036.84-1.875 1.875-1.875ZM9.75 14.25a.75.75 0 0 0 0 1.5H15a.75.75 0 0 0 0-1.5H9.75Z" <path
clip-rule="evenodd" fill-rule="evenodd"
/> d="M3.5 2A1.5 1.5 0 0 0 2 3.5v9A1.5 1.5 0 0 0 3.5 14h9a1.5 1.5 0 0 0 1.5-1.5v-7A1.5 1.5 0 0 0 12.5 4H9.621a1.5 1.5 0 0 1-1.06-.44L7.439 2.44A1.5 1.5 0 0 0 6.38 2H3.5Zm6.75 7.75a.75.75 0 0 0 0-1.5h-4.5a.75.75 0 0 0 0 1.5h4.5Z"
<path clip-rule="evenodd"
d="M14.25 5.25a5.23 5.23 0 0 0-1.279-3.434 9.768 9.768 0 0 1 6.963 6.963A5.23 5.23 0 0 0 16.5 7.5h-1.875a.375.375 0 0 1-.375-.375V5.25Z" />
/> </svg>
</svg>
</div>
<div class=" self-center text-sm font-medium">{$i18n.t('Reset Upload Directory')}</div>
</button>
{/if}
{#if showResetConfirm}
<div class="flex justify-between rounded-md items-center py-2 px-3.5 w-full transition">
<div class="flex items-center space-x-3">
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M3.5 2A1.5 1.5 0 0 0 2 3.5v9A1.5 1.5 0 0 0 3.5 14h9a1.5 1.5 0 0 0 1.5-1.5v-7A1.5 1.5 0 0 0 12.5 4H9.621a1.5 1.5 0 0 1-1.06-.44L7.439 2.44A1.5 1.5 0 0 0 6.38 2H3.5Zm6.75 7.75a.75.75 0 0 0 0-1.5h-4.5a.75.75 0 0 0 0 1.5h4.5Z"
clip-rule="evenodd"
/>
</svg>
<span>{$i18n.t('Are you sure?')}</span>
</div>
<div class="flex space-x-1.5 items-center">
<button
class="hover:text-white transition"
on:click={() => {
const res = resetVectorDB(localStorage.token).catch((error) => {
toast.error(error);
return null;
});
if (res) {
toast.success($i18n.t('Success'));
}
showResetConfirm = false;
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M16.704 4.153a.75.75 0 01.143 1.052l-8 10.5a.75.75 0 01-1.127.075l-4.5-4.5a.75.75 0 011.06-1.06l3.894 3.893 7.48-9.817a.75.75 0 011.05-.143z"
clip-rule="evenodd"
/>
</svg>
</button>
<button
class="hover:text-white transition"
on:click={() => {
showResetConfirm = false;
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-4 h-4"
>
<path
d="M6.28 5.22a.75.75 0 00-1.06 1.06L8.94 10l-3.72 3.72a.75.75 0 101.06 1.06L10 11.06l3.72 3.72a.75.75 0 101.06-1.06L11.06 10l3.72-3.72a.75.75 0 00-1.06-1.06L10 8.94 6.28 5.22z"
/>
</svg>
</button>
</div>
</div> </div>
{:else} <div class=" self-center text-sm font-medium">{$i18n.t('Reset Vector Storage')}</div>
<button </button>
class=" flex rounded-xl py-2 px-3.5 w-full hover:bg-gray-200 dark:hover:bg-gray-800 transition"
on:click={() => {
showResetConfirm = true;
}}
type="button"
>
<div class=" self-center mr-3">
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M3.5 2A1.5 1.5 0 0 0 2 3.5v9A1.5 1.5 0 0 0 3.5 14h9a1.5 1.5 0 0 0 1.5-1.5v-7A1.5 1.5 0 0 0 12.5 4H9.621a1.5 1.5 0 0 1-1.06-.44L7.439 2.44A1.5 1.5 0 0 0 6.38 2H3.5Zm6.75 7.75a.75.75 0 0 0 0-1.5h-4.5a.75.75 0 0 0 0 1.5h4.5Z"
clip-rule="evenodd"
/>
</svg>
</div>
<div class=" self-center text-sm font-medium">{$i18n.t('Reset Vector Storage')}</div>
</button>
{/if}
</div> </div>
</div> </div>
<div class="flex justify-end pt-3 text-sm font-medium"> <div class="flex justify-end pt-3 text-sm font-medium">
......
<script lang="ts"> <script lang="ts">
import { toast } from 'svelte-sonner'; import { toast } from 'svelte-sonner';
import { onMount, getContext } from 'svelte';
import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants';
import { WEBUI_NAME, models, MODEL_DOWNLOAD_POOL, user, config } from '$lib/stores';
import { splitStream } from '$lib/utils';
import { import {
createModel, createModel,
...@@ -11,15 +16,11 @@ ...@@ -11,15 +16,11 @@
uploadModel, uploadModel,
getOllamaConfig getOllamaConfig
} from '$lib/apis/ollama'; } from '$lib/apis/ollama';
import { getModels as _getModels } from '$lib/apis';
import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants';
import { WEBUI_NAME, models, MODEL_DOWNLOAD_POOL, user, config } from '$lib/stores';
import { splitStream } from '$lib/utils';
import { onMount, getContext } from 'svelte';
import Tooltip from '$lib/components/common/Tooltip.svelte'; import Tooltip from '$lib/components/common/Tooltip.svelte';
import Spinner from '$lib/components/common/Spinner.svelte'; import Spinner from '$lib/components/common/Spinner.svelte';
import { getModels as _getModels } from '$lib/apis'; import ModelDeleteConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
const i18n = getContext('i18n'); const i18n = getContext('i18n');
...@@ -29,6 +30,8 @@ ...@@ -29,6 +30,8 @@
let modelUploadInputElement: HTMLInputElement; let modelUploadInputElement: HTMLInputElement;
let showModelDeleteConfirm = false;
// Models // Models
let ollamaEnabled = null; let ollamaEnabled = null;
...@@ -549,6 +552,13 @@ ...@@ -549,6 +552,13 @@
}); });
</script> </script>
<ModelDeleteConfirmDialog
bind:show={showModelDeleteConfirm}
on:confirm={() => {
deleteModelHandler();
}}
/>
<div class="flex flex-col h-full justify-between text-sm"> <div class="flex flex-col h-full justify-between text-sm">
<div class=" space-y-3 overflow-y-scroll scrollbar-hidden h-full"> <div class=" space-y-3 overflow-y-scroll scrollbar-hidden h-full">
{#if ollamaEnabled} {#if ollamaEnabled}
...@@ -763,7 +773,7 @@ ...@@ -763,7 +773,7 @@
<button <button
class="px-2.5 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded-lg transition" class="px-2.5 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded-lg transition"
on:click={() => { on:click={() => {
deleteModelHandler(); showModelDeleteConfirm = true;
}} }}
> >
<svg <svg
......
...@@ -18,7 +18,8 @@ ...@@ -18,7 +18,8 @@
'serpstack', 'serpstack',
'serper', 'serper',
'serply', 'serply',
'duckduckgo' 'duckduckgo',
'tavily'
]; ];
let youtubeLanguage = 'en'; let youtubeLanguage = 'en';
...@@ -214,6 +215,24 @@ ...@@ -214,6 +215,24 @@
</div> </div>
</div> </div>
</div> </div>
{:else if webConfig.search.engine === 'tavily'}
<div>
<div class=" self-center text-xs font-medium mb-1">
{$i18n.t('Tavily API Key')}
</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
type="text"
placeholder={$i18n.t('Enter Tavily API Key')}
bind:value={webConfig.search.tavily_api_key}
autocomplete="off"
/>
</div>
</div>
</div>
{/if} {/if}
</div> </div>
{/if} {/if}
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
import { import {
convertMessagesToHistory, convertMessagesToHistory,
copyToClipboard, copyToClipboard,
extractSentencesForAudio,
promptTemplate, promptTemplate,
splitStream splitStream
} from '$lib/utils'; } from '$lib/utils';
...@@ -64,6 +65,8 @@ ...@@ -64,6 +65,8 @@
export let chatIdProp = ''; export let chatIdProp = '';
let loaded = false; let loaded = false;
const eventTarget = new EventTarget();
let stopResponseFlag = false; let stopResponseFlag = false;
let autoScroll = true; let autoScroll = true;
let processing = ''; let processing = '';
...@@ -108,7 +111,8 @@ ...@@ -108,7 +111,8 @@
$: if (chatIdProp) { $: if (chatIdProp) {
(async () => { (async () => {
if (await loadChat()) { console.log(chatIdProp);
if (chatIdProp && (await loadChat())) {
await tick(); await tick();
loaded = true; loaded = true;
...@@ -123,7 +127,11 @@ ...@@ -123,7 +127,11 @@
onMount(async () => { onMount(async () => {
if (!$chatId) { if (!$chatId) {
await initNewChat(); chatId.subscribe(async (value) => {
if (!value) {
await initNewChat();
}
});
} else { } else {
if (!($settings.saveChatHistory ?? true)) { if (!($settings.saveChatHistory ?? true)) {
await goto('/'); await goto('/');
...@@ -300,7 +308,7 @@ ...@@ -300,7 +308,7 @@
// Chat functions // Chat functions
////////////////////////// //////////////////////////
const submitPrompt = async (userPrompt, _user = null) => { const submitPrompt = async (userPrompt, { _raw = false } = {}) => {
let _responses = []; let _responses = [];
console.log('submitPrompt', $chatId); console.log('submitPrompt', $chatId);
...@@ -344,7 +352,6 @@ ...@@ -344,7 +352,6 @@
parentId: messages.length !== 0 ? messages.at(-1).id : null, parentId: messages.length !== 0 ? messages.at(-1).id : null,
childrenIds: [], childrenIds: [],
role: 'user', role: 'user',
user: _user ?? undefined,
content: userPrompt, content: userPrompt,
files: _files.length > 0 ? _files : undefined, files: _files.length > 0 ? _files : undefined,
timestamp: Math.floor(Date.now() / 1000), // Unix epoch timestamp: Math.floor(Date.now() / 1000), // Unix epoch
...@@ -362,15 +369,13 @@ ...@@ -362,15 +369,13 @@
// Wait until history/message have been updated // Wait until history/message have been updated
await tick(); await tick();
_responses = await sendPrompt(userPrompt, userMessageId, { newChat: true });
// Send prompt
_responses = await sendPrompt(userPrompt, userMessageId);
} }
return _responses; return _responses;
}; };
const sendPrompt = async (prompt, parentId, modelId = null, newChat = true) => { const sendPrompt = async (prompt, parentId, { modelId = null, newChat = false } = {}) => {
let _responses = []; let _responses = [];
// If modelId is provided, use it, else use selected model // If modelId is provided, use it, else use selected model
...@@ -490,7 +495,6 @@ ...@@ -490,7 +495,6 @@
responseMessage.userContext = userContext; responseMessage.userContext = userContext;
const chatEventEmitter = await getChatEventEmitter(model.id, _chatId); const chatEventEmitter = await getChatEventEmitter(model.id, _chatId);
if (webSearchEnabled) { if (webSearchEnabled) {
await getWebSearchResults(model.id, parentId, responseMessageId); await getWebSearchResults(model.id, parentId, responseMessageId);
} }
...@@ -503,8 +507,6 @@ ...@@ -503,8 +507,6 @@
} }
_responses.push(_response); _responses.push(_response);
console.log('chatEventEmitter', chatEventEmitter);
if (chatEventEmitter) clearInterval(chatEventEmitter); if (chatEventEmitter) clearInterval(chatEventEmitter);
} else { } else {
toast.error($i18n.t(`Model {{modelId}} not found`, { modelId })); toast.error($i18n.t(`Model {{modelId}} not found`, { modelId }));
...@@ -513,88 +515,9 @@ ...@@ -513,88 +515,9 @@
); );
await chats.set(await getChatList(localStorage.token)); await chats.set(await getChatList(localStorage.token));
return _responses; return _responses;
}; };
const getWebSearchResults = async (model: string, parentId: string, responseId: string) => {
const responseMessage = history.messages[responseId];
responseMessage.statusHistory = [
{
done: false,
action: 'web_search',
description: $i18n.t('Generating search query')
}
];
messages = messages;
const prompt = history.messages[parentId].content;
let searchQuery = await generateSearchQuery(localStorage.token, model, messages, prompt).catch(
(error) => {
console.log(error);
return prompt;
}
);
if (!searchQuery) {
toast.warning($i18n.t('No search query generated'));
responseMessage.statusHistory.push({
done: true,
error: true,
action: 'web_search',
description: 'No search query generated'
});
messages = messages;
}
responseMessage.statusHistory.push({
done: false,
action: 'web_search',
description: $i18n.t(`Searching "{{searchQuery}}"`, { searchQuery })
});
messages = messages;
const results = await runWebSearch(localStorage.token, searchQuery).catch((error) => {
console.log(error);
toast.error(error);
return null;
});
if (results) {
responseMessage.statusHistory.push({
done: true,
action: 'web_search',
description: $i18n.t('Searched {{count}} sites', { count: results.filenames.length }),
query: searchQuery,
urls: results.filenames
});
if (responseMessage?.files ?? undefined === undefined) {
responseMessage.files = [];
}
responseMessage.files.push({
collection_name: results.collection_name,
name: searchQuery,
type: 'web_search_results',
urls: results.filenames
});
messages = messages;
} else {
responseMessage.statusHistory.push({
done: true,
error: true,
action: 'web_search',
description: 'No search results found'
});
messages = messages;
}
};
const sendPromptOllama = async (model, userPrompt, responseMessageId, _chatId) => { const sendPromptOllama = async (model, userPrompt, responseMessageId, _chatId) => {
let _response = null; let _response = null;
...@@ -676,6 +599,16 @@ ...@@ -676,6 +599,16 @@
array.findIndex((i) => JSON.stringify(i) === JSON.stringify(item)) === index array.findIndex((i) => JSON.stringify(i) === JSON.stringify(item)) === index
); );
eventTarget.dispatchEvent(
new CustomEvent('chat:start', {
detail: {
id: responseMessageId
}
})
);
await tick();
const [res, controller] = await generateChatCompletion(localStorage.token, { const [res, controller] = await generateChatCompletion(localStorage.token, {
model: model.id, model: model.id,
messages: messagesBody, messages: messagesBody,
...@@ -745,6 +678,23 @@ ...@@ -745,6 +678,23 @@
continue; continue;
} else { } else {
responseMessage.content += data.message.content; responseMessage.content += data.message.content;
const sentences = extractSentencesForAudio(responseMessage.content);
sentences.pop();
// dispatch only last sentence and make sure it hasn't been dispatched before
if (
sentences.length > 0 &&
sentences[sentences.length - 1] !== responseMessage.lastSentence
) {
responseMessage.lastSentence = sentences[sentences.length - 1];
eventTarget.dispatchEvent(
new CustomEvent('chat', {
detail: { id: responseMessageId, content: sentences[sentences.length - 1] }
})
);
}
messages = messages; messages = messages;
} }
} else { } else {
...@@ -771,21 +721,13 @@ ...@@ -771,21 +721,13 @@
messages = messages; messages = messages;
if ($settings.notificationEnabled && !document.hasFocus()) { if ($settings.notificationEnabled && !document.hasFocus()) {
const notification = new Notification( const notification = new Notification(`${model.id}`, {
selectedModelfile body: responseMessage.content,
? `${ icon: `${WEBUI_BASE_URL}/static/favicon.png`
selectedModelfile.title.charAt(0).toUpperCase() + });
selectedModelfile.title.slice(1)
}`
: `${model.id}`,
{
body: responseMessage.content,
icon: selectedModelfile?.imageUrl ?? `${WEBUI_BASE_URL}/static/favicon.png`
}
);
} }
if ($settings.responseAutoCopy) { if ($settings?.responseAutoCopy ?? false) {
copyToClipboard(responseMessage.content); copyToClipboard(responseMessage.content);
} }
...@@ -847,6 +789,23 @@ ...@@ -847,6 +789,23 @@
stopResponseFlag = false; stopResponseFlag = false;
await tick(); await tick();
let lastSentence = extractSentencesForAudio(responseMessage.content)?.at(-1) ?? '';
if (lastSentence) {
eventTarget.dispatchEvent(
new CustomEvent('chat', {
detail: { id: responseMessageId, content: lastSentence }
})
);
}
eventTarget.dispatchEvent(
new CustomEvent('chat:finish', {
detail: {
id: responseMessageId,
content: responseMessage.content
}
})
);
if (autoScroll) { if (autoScroll) {
scrollToBottom(); scrollToBottom();
} }
...@@ -887,6 +846,15 @@ ...@@ -887,6 +846,15 @@
scrollToBottom(); scrollToBottom();
eventTarget.dispatchEvent(
new CustomEvent('chat:start', {
detail: {
id: responseMessageId
}
})
);
await tick();
try { try {
const [res, controller] = await generateOpenAIChatCompletion( const [res, controller] = await generateOpenAIChatCompletion(
localStorage.token, localStorage.token,
...@@ -1007,6 +975,23 @@ ...@@ -1007,6 +975,23 @@
continue; continue;
} else { } else {
responseMessage.content += value; responseMessage.content += value;
const sentences = extractSentencesForAudio(responseMessage.content);
sentences.pop();
// dispatch only last sentence and make sure it hasn't been dispatched before
if (
sentences.length > 0 &&
sentences[sentences.length - 1] !== responseMessage.lastSentence
) {
responseMessage.lastSentence = sentences[sentences.length - 1];
eventTarget.dispatchEvent(
new CustomEvent('chat', {
detail: { id: responseMessageId, content: sentences[sentences.length - 1] }
})
);
}
messages = messages; messages = messages;
} }
...@@ -1057,6 +1042,24 @@ ...@@ -1057,6 +1042,24 @@
stopResponseFlag = false; stopResponseFlag = false;
await tick(); await tick();
let lastSentence = extractSentencesForAudio(responseMessage.content)?.at(-1) ?? '';
if (lastSentence) {
eventTarget.dispatchEvent(
new CustomEvent('chat', {
detail: { id: responseMessageId, content: lastSentence }
})
);
}
eventTarget.dispatchEvent(
new CustomEvent('chat:finish', {
detail: {
id: responseMessageId,
content: responseMessage.content
}
})
);
if (autoScroll) { if (autoScroll) {
scrollToBottom(); scrollToBottom();
} }
...@@ -1123,9 +1126,12 @@ ...@@ -1123,9 +1126,12 @@
let userPrompt = userMessage.content; let userPrompt = userMessage.content;
if ((userMessage?.models ?? [...selectedModels]).length == 1) { if ((userMessage?.models ?? [...selectedModels]).length == 1) {
await sendPrompt(userPrompt, userMessage.id, undefined, false); // If user message has only one model selected, sendPrompt automatically selects it for regeneration
await sendPrompt(userPrompt, userMessage.id);
} else { } else {
await sendPrompt(userPrompt, userMessage.id, message.model, false); // If there are multiple models selected, use the model of the response message for regeneration
// e.g. many model chat
await sendPrompt(userPrompt, userMessage.id, { modelId: message.model });
} }
} }
}; };
...@@ -1191,6 +1197,84 @@ ...@@ -1191,6 +1197,84 @@
} }
}; };
const getWebSearchResults = async (model: string, parentId: string, responseId: string) => {
const responseMessage = history.messages[responseId];
responseMessage.statusHistory = [
{
done: false,
action: 'web_search',
description: $i18n.t('Generating search query')
}
];
messages = messages;
const prompt = history.messages[parentId].content;
let searchQuery = await generateSearchQuery(localStorage.token, model, messages, prompt).catch(
(error) => {
console.log(error);
return prompt;
}
);
if (!searchQuery) {
toast.warning($i18n.t('No search query generated'));
responseMessage.statusHistory.push({
done: true,
error: true,
action: 'web_search',
description: 'No search query generated'
});
messages = messages;
}
responseMessage.statusHistory.push({
done: false,
action: 'web_search',
description: $i18n.t(`Searching "{{searchQuery}}"`, { searchQuery })
});
messages = messages;
const results = await runWebSearch(localStorage.token, searchQuery).catch((error) => {
console.log(error);
toast.error(error);
return null;
});
if (results) {
responseMessage.statusHistory.push({
done: true,
action: 'web_search',
description: $i18n.t('Searched {{count}} sites', { count: results.filenames.length }),
query: searchQuery,
urls: results.filenames
});
if (responseMessage?.files ?? undefined === undefined) {
responseMessage.files = [];
}
responseMessage.files.push({
collection_name: results.collection_name,
name: searchQuery,
type: 'web_search_results',
urls: results.filenames
});
messages = messages;
} else {
responseMessage.statusHistory.push({
done: true,
error: true,
action: 'web_search',
description: 'No search results found'
});
messages = messages;
}
};
const getTags = async () => { const getTags = async () => {
return await getTagsById(localStorage.token, $chatId).catch(async (error) => { return await getTagsById(localStorage.token, $chatId).catch(async (error) => {
return []; return [];
...@@ -1206,7 +1290,18 @@ ...@@ -1206,7 +1290,18 @@
</title> </title>
</svelte:head> </svelte:head>
<CallOverlay {submitPrompt} bind:files /> <audio id="audioElement" src="" style="display: none;" />
{#if $showCallOverlay}
<CallOverlay
{submitPrompt}
{stopResponse}
bind:files
modelId={selectedModelIds?.at(0) ?? null}
chatId={$chatId}
{eventTarget}
/>
{/if}
{#if !chatIdProp || (loaded && chatIdProp)} {#if !chatIdProp || (loaded && chatIdProp)}
<div <div
......
...@@ -348,7 +348,6 @@ ...@@ -348,7 +348,6 @@
<Models <Models
bind:this={modelsElement} bind:this={modelsElement}
bind:prompt bind:prompt
bind:user
bind:chatInputPlaceholder bind:chatInputPlaceholder
{messages} {messages}
on:select={(e) => { on:select={(e) => {
...@@ -467,7 +466,7 @@ ...@@ -467,7 +466,7 @@
document.getElementById('chat-textarea')?.focus(); document.getElementById('chat-textarea')?.focus();
if ($settings?.speechAutoSend ?? false) { if ($settings?.speechAutoSend ?? false) {
submitPrompt(prompt, user); submitPrompt(prompt);
} }
}} }}
/> />
...@@ -476,7 +475,7 @@ ...@@ -476,7 +475,7 @@
class="w-full flex gap-1.5" class="w-full flex gap-1.5"
on:submit|preventDefault={() => { on:submit|preventDefault={() => {
// check if selectedModels support image input // check if selectedModels support image input
submitPrompt(prompt, user); submitPrompt(prompt);
}} }}
> >
<div <div
...@@ -718,7 +717,7 @@ ...@@ -718,7 +717,7 @@
// Submit the prompt when Enter key is pressed // Submit the prompt when Enter key is pressed
if (prompt !== '' && e.key === 'Enter' && !e.shiftKey) { if (prompt !== '' && e.key === 'Enter' && !e.shiftKey) {
submitPrompt(prompt, user); submitPrompt(prompt);
} }
} }
}} }}
......
...@@ -2,40 +2,238 @@ ...@@ -2,40 +2,238 @@
import { config, settings, showCallOverlay } from '$lib/stores'; import { config, settings, showCallOverlay } from '$lib/stores';
import { onMount, tick, getContext } from 'svelte'; import { onMount, tick, getContext } from 'svelte';
import { blobToFile, calculateSHA256, extractSentences, findWordIndices } from '$lib/utils'; import {
blobToFile,
calculateSHA256,
extractSentencesForAudio,
findWordIndices
} from '$lib/utils';
import { generateEmoji } from '$lib/apis';
import { synthesizeOpenAISpeech, transcribeAudio } from '$lib/apis/audio'; import { synthesizeOpenAISpeech, transcribeAudio } from '$lib/apis/audio';
import { toast } from 'svelte-sonner'; import { toast } from 'svelte-sonner';
import Tooltip from '$lib/components/common/Tooltip.svelte'; import Tooltip from '$lib/components/common/Tooltip.svelte';
import VideoInputMenu from './CallOverlay/VideoInputMenu.svelte'; import VideoInputMenu from './CallOverlay/VideoInputMenu.svelte';
import { get } from 'svelte/store';
const i18n = getContext('i18n'); const i18n = getContext('i18n');
export let eventTarget: EventTarget;
export let submitPrompt: Function; export let submitPrompt: Function;
export let stopResponse: Function;
export let files; export let files;
export let chatId;
export let modelId;
let loading = false; let loading = false;
let confirmed = false; let confirmed = false;
let interrupted = false;
let emoji = null;
let camera = false; let camera = false;
let cameraStream = null; let cameraStream = null;
let assistantSpeaking = false; let chatStreaming = false;
let assistantAudio = {};
let assistantAudioIdx = null;
let rmsLevel = 0; let rmsLevel = 0;
let hasStartedSpeaking = false; let hasStartedSpeaking = false;
let currentUtterance = null;
let mediaRecorder; let mediaRecorder;
let audioChunks = []; let audioChunks = [];
const MIN_DECIBELS = -45; let videoInputDevices = [];
let selectedVideoInputDeviceId = null;
const getVideoInputDevices = async () => {
const devices = await navigator.mediaDevices.enumerateDevices();
videoInputDevices = devices.filter((device) => device.kind === 'videoinput');
if (!!navigator.mediaDevices.getDisplayMedia) {
videoInputDevices = [
...videoInputDevices,
{
deviceId: 'screen',
label: 'Screen Share'
}
];
}
console.log(videoInputDevices);
if (selectedVideoInputDeviceId === null && videoInputDevices.length > 0) {
selectedVideoInputDeviceId = videoInputDevices[0].deviceId;
}
};
const startCamera = async () => {
await getVideoInputDevices();
if (cameraStream === null) {
camera = true;
await tick();
try {
await startVideoStream();
} catch (err) {
console.error('Error accessing webcam: ', err);
}
}
};
const startVideoStream = async () => {
const video = document.getElementById('camera-feed');
if (video) {
if (selectedVideoInputDeviceId === 'screen') {
cameraStream = await navigator.mediaDevices.getDisplayMedia({
video: {
cursor: 'always'
},
audio: false
});
} else {
cameraStream = await navigator.mediaDevices.getUserMedia({
video: {
deviceId: selectedVideoInputDeviceId ? { exact: selectedVideoInputDeviceId } : undefined
}
});
}
if (cameraStream) {
await getVideoInputDevices();
video.srcObject = cameraStream;
await video.play();
}
}
};
const stopVideoStream = async () => {
if (cameraStream) {
const tracks = cameraStream.getTracks();
tracks.forEach((track) => track.stop());
}
cameraStream = null;
};
const takeScreenshot = () => {
const video = document.getElementById('camera-feed');
const canvas = document.getElementById('camera-canvas');
if (!canvas) {
return;
}
const context = canvas.getContext('2d');
// Make the canvas match the video dimensions
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
// Draw the image from the video onto the canvas
context.drawImage(video, 0, 0, video.videoWidth, video.videoHeight);
// Convert the canvas to a data base64 URL and console log it
const dataURL = canvas.toDataURL('image/png');
console.log(dataURL);
return dataURL;
};
const stopCamera = async () => {
await stopVideoStream();
camera = false;
};
const MIN_DECIBELS = -55;
const VISUALIZER_BUFFER_LENGTH = 300; const VISUALIZER_BUFFER_LENGTH = 300;
const transcribeHandler = async (audioBlob) => {
// Create a blob from the audio chunks
await tick();
const file = blobToFile(audioBlob, 'recording.wav');
const res = await transcribeAudio(localStorage.token, file).catch((error) => {
toast.error(error);
return null;
});
if (res) {
console.log(res.text);
if (res.text !== '') {
const _responses = await submitPrompt(res.text, { _raw: true });
console.log(_responses);
}
}
};
const stopRecordingCallback = async (_continue = true) => {
if ($showCallOverlay) {
console.log('%c%s', 'color: red; font-size: 20px;', '🚨 stopRecordingCallback 🚨');
// deep copy the audioChunks array
const _audioChunks = audioChunks.slice(0);
audioChunks = [];
mediaRecorder = false;
if (_continue) {
startRecording();
}
if (confirmed) {
loading = true;
emoji = null;
if (cameraStream) {
const imageUrl = takeScreenshot();
files = [
{
type: 'image',
url: imageUrl
}
];
}
const audioBlob = new Blob(_audioChunks, { type: 'audio/wav' });
await transcribeHandler(audioBlob);
confirmed = false;
loading = false;
}
} else {
audioChunks = [];
mediaRecorder = false;
}
};
const startRecording = async () => {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.onstart = () => {
console.log('Recording started');
audioChunks = [];
analyseAudio(stream);
};
mediaRecorder.ondataavailable = (event) => {
if (hasStartedSpeaking) {
audioChunks.push(event.data);
}
};
mediaRecorder.onstop = (e) => {
console.log('Recording stopped', e);
stopRecordingCallback();
};
mediaRecorder.start();
};
// Function to calculate the RMS level from time domain data // Function to calculate the RMS level from time domain data
const calculateRMS = (data: Uint8Array) => { const calculateRMS = (data: Uint8Array) => {
let sumSquares = 0; let sumSquares = 0;
...@@ -46,15 +244,6 @@ ...@@ -46,15 +244,6 @@
return Math.sqrt(sumSquares / data.length); return Math.sqrt(sumSquares / data.length);
}; };
const normalizeRMS = (rms) => {
rms = rms * 10;
const exp = 1.5; // Adjust exponent value; values greater than 1 expand larger numbers more and compress smaller numbers more
const scaledRMS = Math.pow(rms, exp);
// Scale between 0.01 (1%) and 1.0 (100%)
return Math.min(1.0, Math.max(0.01, scaledRMS));
};
const analyseAudio = (stream) => { const analyseAudio = (stream) => {
const audioContext = new AudioContext(); const audioContext = new AudioContext();
const audioStreamSource = audioContext.createMediaStreamSource(stream); const audioStreamSource = audioContext.createMediaStreamSource(stream);
...@@ -71,15 +260,14 @@ ...@@ -71,15 +260,14 @@
let lastSoundTime = Date.now(); let lastSoundTime = Date.now();
hasStartedSpeaking = false; hasStartedSpeaking = false;
console.log('🔊 Sound detection started', lastSoundTime, hasStartedSpeaking);
const detectSound = () => { const detectSound = () => {
const processFrame = () => { const processFrame = () => {
if (!mediaRecorder || !$showCallOverlay) { if (!mediaRecorder || !$showCallOverlay) {
if (mediaRecorder) {
mediaRecorder.stop();
}
return; return;
} }
analyser.getByteTimeDomainData(timeDomainData); analyser.getByteTimeDomainData(timeDomainData);
analyser.getByteFrequencyData(domainData); analyser.getByteFrequencyData(domainData);
...@@ -89,8 +277,14 @@ ...@@ -89,8 +277,14 @@
// Check if initial speech/noise has started // Check if initial speech/noise has started
const hasSound = domainData.some((value) => value > 0); const hasSound = domainData.some((value) => value > 0);
if (hasSound) { if (hasSound) {
stopAllAudio(); // BIG RED TEXT
hasStartedSpeaking = true; console.log('%c%s', 'color: red; font-size: 20px;', '🔊 Sound detected');
if (!hasStartedSpeaking) {
hasStartedSpeaking = true;
stopAllAudio();
}
lastSoundTime = Date.now(); lastSoundTime = Date.now();
} }
...@@ -100,7 +294,9 @@ ...@@ -100,7 +294,9 @@
confirmed = true; confirmed = true;
if (mediaRecorder) { if (mediaRecorder) {
console.log('%c%s', 'color: red; font-size: 20px;', '🔇 Silence detected');
mediaRecorder.stop(); mediaRecorder.stop();
return;
} }
} }
} }
...@@ -114,348 +310,288 @@ ...@@ -114,348 +310,288 @@
detectSound(); detectSound();
}; };
const stopAllAudio = () => { let finishedMessages = {};
if (currentUtterance) { let currentMessageId = null;
speechSynthesis.cancel(); let currentUtterance = null;
currentUtterance = null;
}
if (assistantAudio[assistantAudioIdx]) {
assistantAudio[assistantAudioIdx].pause();
assistantAudio[assistantAudioIdx].currentTime = 0;
}
const audioElement = document.getElementById('audioElement'); const speakSpeechSynthesisHandler = (content) => {
audioElement.pause(); if ($showCallOverlay) {
audioElement.currentTime = 0; return new Promise((resolve) => {
let voices = [];
const getVoicesLoop = setInterval(async () => {
voices = await speechSynthesis.getVoices();
if (voices.length > 0) {
clearInterval(getVoicesLoop);
const voice =
voices
?.filter(
(v) => v.voiceURI === ($settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice)
)
?.at(0) ?? undefined;
currentUtterance = new SpeechSynthesisUtterance(content);
if (voice) {
currentUtterance.voice = voice;
}
assistantSpeaking = false; speechSynthesis.speak(currentUtterance);
currentUtterance.onend = async (e) => {
await new Promise((r) => setTimeout(r, 200));
resolve(e);
};
}
}, 100);
});
} else {
return Promise.resolve();
}
}; };
const playAudio = (idx) => { const playAudio = (audio) => {
if ($showCallOverlay) { if ($showCallOverlay) {
return new Promise((res) => { return new Promise((resolve) => {
assistantAudioIdx = idx;
const audioElement = document.getElementById('audioElement'); const audioElement = document.getElementById('audioElement');
const audio = assistantAudio[idx];
audioElement.src = audio.src; // Assume `assistantAudio` has objects with a `src` property if (audioElement) {
audioElement.src = audio.src;
audioElement.muted = true; audioElement.muted = true;
audioElement audioElement
.play() .play()
.then(() => { .then(() => {
audioElement.muted = false; audioElement.muted = false;
}) })
.catch((error) => { .catch((error) => {
toast.error(error); console.error(error);
}); });
audioElement.onended = async (e) => { audioElement.onended = async (e) => {
await new Promise((r) => setTimeout(r, 300)); await new Promise((r) => setTimeout(r, 100));
resolve(e);
if (Object.keys(assistantAudio).length - 1 === idx) { };
assistantSpeaking = false; }
}
res(e);
};
}); });
} else { } else {
return Promise.resolve(); return Promise.resolve();
} }
}; };
const getOpenAISpeech = async (text) => { const stopAllAudio = async () => {
const res = await synthesizeOpenAISpeech( interrupted = true;
localStorage.token,
$settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice,
text
).catch((error) => {
toast.error(error);
assistantSpeaking = false;
return null;
});
if (res) { if (chatStreaming) {
const blob = await res.blob(); stopResponse();
const blobUrl = URL.createObjectURL(blob);
const audio = new Audio(blobUrl);
assistantAudio = audio;
} }
};
const transcribeHandler = async (audioBlob) => { if (currentUtterance) {
// Create a blob from the audio chunks speechSynthesis.cancel();
currentUtterance = null;
}
await tick(); const audioElement = document.getElementById('audioElement');
const file = blobToFile(audioBlob, 'recording.wav'); if (audioElement) {
audioElement.muted = true;
audioElement.pause();
audioElement.currentTime = 0;
}
};
const res = await transcribeAudio(localStorage.token, file).catch((error) => { let audioAbortController = new AbortController();
toast.error(error);
return null;
});
if (res) { // Audio cache map where key is the content and value is the Audio object.
console.log(res.text); const audioCache = new Map();
const emojiCache = new Map();
if (res.text !== '') { const fetchAudio = async (content) => {
const _responses = await submitPrompt(res.text); if (!audioCache.has(content)) {
console.log(_responses); try {
// Set the emoji for the content if needed
if ($settings?.showEmojiInCall ?? false) {
const emoji = await generateEmoji(localStorage.token, modelId, content, chatId);
if (emoji) {
emojiCache.set(content, emoji);
}
}
if (_responses.at(0)) { if ($config.audio.tts.engine !== '') {
const content = _responses[0]; const res = await synthesizeOpenAISpeech(
if ((content ?? '').trim() !== '') { localStorage.token,
assistantSpeakingHandler(content); $settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice,
content
).catch((error) => {
console.error(error);
return null;
});
if (res) {
const blob = await res.blob();
const blobUrl = URL.createObjectURL(blob);
audioCache.set(content, new Audio(blobUrl));
} }
} else {
audioCache.set(content, true);
} }
} catch (error) {
console.error('Error synthesizing speech:', error);
} }
} }
};
const assistantSpeakingHandler = async (content) => { return audioCache.get(content);
assistantSpeaking = true; };
if (($config.audio.tts.engine ?? '') == '') { let messages = {};
let voices = [];
const getVoicesLoop = setInterval(async () => {
voices = await speechSynthesis.getVoices();
if (voices.length > 0) {
clearInterval(getVoicesLoop);
const voice = const monitorAndPlayAudio = async (id, signal) => {
voices while (!signal.aborted) {
?.filter( if (messages[id] && messages[id].length > 0) {
(v) => v.voiceURI === ($settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice) // Retrieve the next content string from the queue
) const content = messages[id].shift(); // Dequeues the content for playing
?.at(0) ?? undefined;
currentUtterance = new SpeechSynthesisUtterance(content); if (audioCache.has(content)) {
// If content is available in the cache, play it
if (voice) { // Set the emoji for the content if available
currentUtterance.voice = voice; if (($settings?.showEmojiInCall ?? false) && emojiCache.has(content)) {
emoji = emojiCache.get(content);
} else {
emoji = null;
} }
speechSynthesis.speak(currentUtterance); if ($config.audio.tts.engine !== '') {
} try {
}, 100); console.log(
} else if ($config.audio.tts.engine === 'openai') { '%c%s',
console.log('openai'); 'color: red; font-size: 20px;',
`Playing audio for content: ${content}`
const sentences = extractSentences(content).reduce((mergedTexts, currentText) => { );
const lastIndex = mergedTexts.length - 1;
if (lastIndex >= 0) { const audio = audioCache.get(content);
const previousText = mergedTexts[lastIndex]; await playAudio(audio); // Here ensure that playAudio is indeed correct method to execute
const wordCount = previousText.split(/\s+/).length; console.log(`Played audio for content: ${content}`);
if (wordCount < 2) { await new Promise((resolve) => setTimeout(resolve, 200)); // Wait before retrying to reduce tight loop
mergedTexts[lastIndex] = previousText + ' ' + currentText; } catch (error) {
console.error('Error playing audio:', error);
}
} else { } else {
mergedTexts.push(currentText); await speakSpeechSynthesisHandler(content);
} }
} else { } else {
mergedTexts.push(currentText); // If not available in the cache, push it back to the queue and delay
} messages[id].unshift(content); // Re-queue the content at the start
return mergedTexts; console.log(`Audio for "${content}" not yet available in the cache, re-queued...`);
}, []); await new Promise((resolve) => setTimeout(resolve, 200)); // Wait before retrying to reduce tight loop
console.log(sentences);
let lastPlayedAudioPromise = Promise.resolve(); // Initialize a promise that resolves immediately
for (const [idx, sentence] of sentences.entries()) {
const res = await synthesizeOpenAISpeech(
localStorage.token,
$settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice,
sentence
).catch((error) => {
toast.error(error);
assistantSpeaking = false;
return null;
});
if (res) {
const blob = await res.blob();
const blobUrl = URL.createObjectURL(blob);
const audio = new Audio(blobUrl);
assistantAudio[idx] = audio;
lastPlayedAudioPromise = lastPlayedAudioPromise.then(() => playAudio(idx));
} }
} else if (finishedMessages[id] && messages[id] && messages[id].length === 0) {
// If the message is finished and there are no more messages to process, break the loop
break;
} else {
// No messages to process, sleep for a bit
await new Promise((resolve) => setTimeout(resolve, 200));
} }
} }
console.log(`Audio monitoring and playing stopped for message ID ${id}`);
}; };
const stopRecordingCallback = async () => { onMount(async () => {
if ($showCallOverlay) { startRecording();
if (confirmed) {
loading = true;
if (cameraStream) {
const imageUrl = takeScreenshot();
files = [ const chatStartHandler = async (e) => {
{ const { id } = e.detail;
type: 'image',
url: imageUrl
}
];
}
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' }); chatStreaming = true;
await transcribeHandler(audioBlob);
confirmed = false; if (currentMessageId !== id) {
loading = false; console.log(`Received chat start event for message ID ${id}`);
}
audioChunks = [];
mediaRecorder = false;
startRecording(); currentMessageId = id;
} else { if (audioAbortController) {
audioChunks = []; audioAbortController.abort();
mediaRecorder = false; }
} audioAbortController = new AbortController();
};
const startRecording = async () => { // Start monitoring and playing audio for the message ID
const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); monitorAndPlayAudio(id, audioAbortController.signal);
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.onstart = () => {
console.log('Recording started');
audioChunks = [];
analyseAudio(stream);
};
mediaRecorder.ondataavailable = (event) => {
if (hasStartedSpeaking) {
audioChunks.push(event.data);
} }
}; };
mediaRecorder.onstop = async () => {
console.log('Recording stopped');
await stopRecordingCallback();
};
mediaRecorder.start();
};
let videoInputDevices = []; const chatEventHandler = async (e) => {
let selectedVideoInputDeviceId = null; const { id, content } = e.detail;
// "id" here is message id
// if "id" is not the same as "currentMessageId" then do not process
// "content" here is a sentence from the assistant,
// there will be many sentences for the same "id"
const getVideoInputDevices = async () => { if (currentMessageId === id) {
const devices = await navigator.mediaDevices.enumerateDevices(); console.log(`Received chat event for message ID ${id}: ${content}`);
videoInputDevices = devices.filter((device) => device.kind === 'videoinput');
if (!!navigator.mediaDevices.getDisplayMedia) { try {
videoInputDevices = [ if (messages[id] === undefined) {
...videoInputDevices, messages[id] = [content];
{ } else {
deviceId: 'screen', messages[id].push(content);
label: 'Screen Share'
}
];
}
console.log(videoInputDevices);
if (selectedVideoInputDeviceId === null && videoInputDevices.length > 0) {
selectedVideoInputDeviceId = videoInputDevices[0].deviceId;
}
};
const startCamera = async () => {
await getVideoInputDevices();
if (cameraStream === null) {
camera = true;
await tick();
try {
await startVideoStream();
} catch (err) {
console.error('Error accessing webcam: ', err);
}
}
};
const startVideoStream = async () => {
const video = document.getElementById('camera-feed');
if (video) {
if (selectedVideoInputDeviceId === 'screen') {
cameraStream = await navigator.mediaDevices.getDisplayMedia({
video: {
cursor: 'always'
},
audio: false
});
} else {
cameraStream = await navigator.mediaDevices.getUserMedia({
video: {
deviceId: selectedVideoInputDeviceId ? { exact: selectedVideoInputDeviceId } : undefined
} }
});
}
if (cameraStream) { console.log(content);
await getVideoInputDevices();
video.srcObject = cameraStream;
await video.play();
}
}
};
const stopVideoStream = async () => { fetchAudio(content);
if (cameraStream) { } catch (error) {
const tracks = cameraStream.getTracks(); console.error('Failed to fetch or play audio:', error);
tracks.forEach((track) => track.stop()); }
} }
};
cameraStream = null;
};
const takeScreenshot = () => {
const video = document.getElementById('camera-feed');
const canvas = document.getElementById('camera-canvas');
if (!canvas) {
return;
}
const context = canvas.getContext('2d'); const chatFinishHandler = async (e) => {
const { id, content } = e.detail;
// "content" here is the entire message from the assistant
// Make the canvas match the video dimensions chatStreaming = false;
canvas.width = video.videoWidth; finishedMessages[id] = true;
canvas.height = video.videoHeight; };
// Draw the image from the video onto the canvas eventTarget.addEventListener('chat:start', chatStartHandler);
context.drawImage(video, 0, 0, video.videoWidth, video.videoHeight); eventTarget.addEventListener('chat', chatEventHandler);
eventTarget.addEventListener('chat:finish', chatFinishHandler);
// Convert the canvas to a data base64 URL and console log it return async () => {
const dataURL = canvas.toDataURL('image/png'); eventTarget.removeEventListener('chat:start', chatStartHandler);
console.log(dataURL); eventTarget.removeEventListener('chat', chatEventHandler);
eventTarget.removeEventListener('chat:finish', chatFinishHandler);
return dataURL; audioAbortController.abort();
}; await tick();
const stopCamera = async () => { await stopAllAudio();
await stopVideoStream();
camera = false;
};
$: if ($showCallOverlay) { await stopRecordingCallback(false);
startRecording(); await stopCamera();
} else { };
stopCamera(); });
}
</script> </script>
{#if $showCallOverlay} {#if $showCallOverlay}
<audio id="audioElement" src="" style="display: none;" />
<div class=" absolute w-full h-screen max-h-[100dvh] flex z-[999] overflow-hidden"> <div class=" absolute w-full h-screen max-h-[100dvh] flex z-[999] overflow-hidden">
<div <div
class="absolute w-full h-screen max-h-[100dvh] bg-white text-gray-700 dark:bg-black dark:text-gray-300 flex justify-center" class="absolute w-full h-screen max-h-[100dvh] bg-white text-gray-700 dark:bg-black dark:text-gray-300 flex justify-center"
> >
<div class="max-w-lg w-full h-screen max-h-[100dvh] flex flex-col justify-between p-3 md:p-6"> <div class="max-w-lg w-full h-screen max-h-[100dvh] flex flex-col justify-between p-3 md:p-6">
{#if camera} {#if camera}
<div class="flex justify-center items-center w-full min-h-20"> <div class="flex justify-center items-center w-full h-20 min-h-20">
{#if loading} {#if emoji}
<div
class=" transition-all rounded-full"
style="font-size:{rmsLevel * 100 > 4
? '4.5'
: rmsLevel * 100 > 2
? '4.25'
: rmsLevel * 100 > 1
? '3.75'
: '3.5'}rem;width: 100%; text-align:center;"
>
{emoji}
</div>
{:else if loading}
<svg <svg
class="size-12 text-gray-900 dark:text-gray-400" class="size-12 text-gray-900 dark:text-gray-400"
viewBox="0 0 24 24" viewBox="0 0 24 24"
...@@ -509,7 +645,20 @@ ...@@ -509,7 +645,20 @@
<div class="flex justify-center items-center flex-1 h-full w-full max-h-full"> <div class="flex justify-center items-center flex-1 h-full w-full max-h-full">
{#if !camera} {#if !camera}
{#if loading} {#if emoji}
<div
class=" transition-all rounded-full"
style="font-size:{rmsLevel * 100 > 4
? '13'
: rmsLevel * 100 > 2
? '12'
: rmsLevel * 100 > 1
? '11.5'
: '11'}rem;width:100%;text-align:center;"
>
{emoji}
</div>
{:else if loading}
<svg <svg
class="size-44 text-gray-900 dark:text-gray-400" class="size-44 text-gray-900 dark:text-gray-400"
viewBox="0 0 24 24" viewBox="0 0 24 24"
......
...@@ -79,7 +79,7 @@ ...@@ -79,7 +79,7 @@
history.currentId = userMessageId; history.currentId = userMessageId;
await tick(); await tick();
await sendPrompt(userPrompt, userMessageId, undefined, false); await sendPrompt(userPrompt, userMessageId);
}; };
const updateChatMessages = async () => { const updateChatMessages = async () => {
......
<script lang="ts"> <script lang="ts">
import { WEBUI_BASE_URL } from '$lib/constants'; import { WEBUI_BASE_URL } from '$lib/constants';
import { marked } from 'marked';
import { config, user, models as _models } from '$lib/stores'; import { config, user, models as _models } from '$lib/stores';
import { onMount, getContext } from 'svelte'; import { onMount, getContext } from 'svelte';
import { blur, fade } from 'svelte/transition'; import { blur, fade } from 'svelte/transition';
import Suggestions from '../MessageInput/Suggestions.svelte'; import Suggestions from '../MessageInput/Suggestions.svelte';
import { sanitizeResponseContent } from '$lib/utils';
const i18n = getContext('i18n'); const i18n = getContext('i18n');
...@@ -65,8 +68,12 @@ ...@@ -65,8 +68,12 @@
<div in:fade={{ duration: 200, delay: 200 }}> <div in:fade={{ duration: 200, delay: 200 }}>
{#if models[selectedModelIdx]?.info?.meta?.description ?? null} {#if models[selectedModelIdx]?.info?.meta?.description ?? null}
<div class="mt-0.5 text-base font-normal text-gray-500 dark:text-gray-400 line-clamp-3"> <div
{models[selectedModelIdx]?.info?.meta?.description} class="mt-0.5 text-base font-normal text-gray-500 dark:text-gray-400 line-clamp-3 markdown"
>
{@html marked.parse(
sanitizeResponseContent(models[selectedModelIdx]?.info?.meta?.description)
)}
</div> </div>
{#if models[selectedModelIdx]?.info?.meta?.user} {#if models[selectedModelIdx]?.info?.meta?.user}
<div class="mt-0.5 text-sm font-normal text-gray-400 dark:text-gray-500"> <div class="mt-0.5 text-sm font-normal text-gray-400 dark:text-gray-500">
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment