Unverified Commit ac34a797 authored by Timothy Jaeryang Baek's avatar Timothy Jaeryang Baek Committed by GitHub
Browse files

Merge branch 'main' into dev

parents bfa64717 c6b9d4cd
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
ChatGPT-Style Web Interface for Ollama 🦙 ChatGPT-Style Web Interface for Ollama 🦙
**Disclaimer:** *ollama-webui is a community-driven project and is not affiliated with the Ollama team in any way. This initiative is independent, and any inquiries or feedback should be directed to [our community on Discord](https://discord.gg/5rJgQTnV4s). We kindly request users to refrain from contacting or harassing the Ollama team regarding this project.* **Disclaimer:** _ollama-webui is a community-driven project and is not affiliated with the Ollama team in any way. This initiative is independent, and any inquiries or feedback should be directed to [our community on Discord](https://discord.gg/5rJgQTnV4s). We kindly request users to refrain from contacting or harassing the Ollama team regarding this project._
![Ollama Web UI Demo](./demo.gif) ![Ollama Web UI Demo](./demo.gif)
...@@ -35,6 +35,8 @@ Also check our sibling project, [OllamaHub](https://ollamahub.com/), where you c ...@@ -35,6 +35,8 @@ Also check our sibling project, [OllamaHub](https://ollamahub.com/), where you c
- 📥🗑️ **Download/Delete Models**: Easily download or remove models directly from the web UI. - 📥🗑️ **Download/Delete Models**: Easily download or remove models directly from the web UI.
- ⬆️ **GGUF File Model Creation**: Effortlessly create Ollama models by uploading GGUF files directly from the web UI. Streamlined process with options to upload from your machine or download GGUF files from Hugging Face.
- 🤖 **Multiple Model Support**: Seamlessly switch between different chat models for diverse interactions. - 🤖 **Multiple Model Support**: Seamlessly switch between different chat models for diverse interactions.
- 🔄 **Multi-Modal Support**: Seamlessly engage with models that support multimodal interactions, including images (e.g., LLava). - 🔄 **Multi-Modal Support**: Seamlessly engage with models that support multimodal interactions, including images (e.g., LLava).
......
__pycache__ __pycache__
.env .env
\ No newline at end of file _old
uploads
\ No newline at end of file
from fastapi import FastAPI, Request, Depends, HTTPException from fastapi import FastAPI, Request, Depends, HTTPException
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from apps.web.routers import auths, users from apps.web.routers import auths, users, utils
from config import WEBUI_VERSION, WEBUI_AUTH from config import WEBUI_VERSION, WEBUI_AUTH
app = FastAPI() app = FastAPI()
...@@ -19,6 +19,7 @@ app.add_middleware( ...@@ -19,6 +19,7 @@ app.add_middleware(
app.include_router(auths.router, prefix="/auths", tags=["auths"]) app.include_router(auths.router, prefix="/auths", tags=["auths"])
app.include_router(users.router, prefix="/users", tags=["users"]) app.include_router(users.router, prefix="/users", tags=["users"])
app.include_router(utils.router, prefix="/utils", tags=["utils"])
@app.get("/") @app.get("/")
......
from fastapi import APIRouter, UploadFile, File, BackgroundTasks
from fastapi import Depends, HTTPException, status
from starlette.responses import StreamingResponse
from pydantic import BaseModel
import requests
import os
import aiohttp
import json
from utils.misc import calculate_sha256
from config import OLLAMA_API_BASE_URL
router = APIRouter()
class UploadBlobForm(BaseModel):
filename: str
from urllib.parse import urlparse
def parse_huggingface_url(hf_url):
try:
# Parse the URL
parsed_url = urlparse(hf_url)
# Get the path and split it into components
path_components = parsed_url.path.split("/")
# Extract the desired output
user_repo = "/".join(path_components[1:3])
model_file = path_components[-1]
return model_file
except ValueError:
return None
async def download_file_stream(url, file_path, file_name, chunk_size=1024 * 1024):
done = False
if os.path.exists(file_path):
current_size = os.path.getsize(file_path)
else:
current_size = 0
headers = {"Range": f"bytes={current_size}-"} if current_size > 0 else {}
timeout = aiohttp.ClientTimeout(total=600) # Set the timeout
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.get(url, headers=headers) as response:
total_size = int(response.headers.get("content-length", 0)) + current_size
with open(file_path, "ab+") as file:
async for data in response.content.iter_chunked(chunk_size):
current_size += len(data)
file.write(data)
done = current_size == total_size
progress = round((current_size / total_size) * 100, 2)
yield f'data: {{"progress": {progress}, "completed": {current_size}, "total": {total_size}}}\n\n'
if done:
file.seek(0)
hashed = calculate_sha256(file)
file.seek(0)
url = f"{OLLAMA_API_BASE_URL}/blobs/sha256:{hashed}"
response = requests.post(url, data=file)
if response.ok:
res = {
"done": done,
"blob": f"sha256:{hashed}",
"name": file_name,
}
os.remove(file_path)
yield f"data: {json.dumps(res)}\n\n"
else:
raise "Ollama: Could not create blob, Please try again."
@router.get("/download")
async def download(
url: str,
):
# url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf"
file_name = parse_huggingface_url(url)
if file_name:
os.makedirs("./uploads", exist_ok=True)
file_path = os.path.join("./uploads", f"{file_name}")
return StreamingResponse(
download_file_stream(url, file_path, file_name),
media_type="text/event-stream",
)
else:
return None
@router.post("/upload")
async def upload(file: UploadFile = File(...)):
os.makedirs("./uploads", exist_ok=True)
file_path = os.path.join("./uploads", file.filename)
async def file_write_stream():
total = 0
total_size = file.size
chunk_size = 1024 * 1024
done = False
try:
with open(file_path, "wb+") as f:
while True:
chunk = file.file.read(chunk_size)
if not chunk:
break
f.write(chunk)
total += len(chunk)
done = total_size == total
progress = round((total / total_size) * 100, 2)
res = {
"progress": progress,
"total": total_size,
"completed": total,
}
yield f"data: {json.dumps(res)}\n\n"
if done:
f.seek(0)
hashed = calculate_sha256(f)
f.seek(0)
url = f"{OLLAMA_API_BASE_URL}/blobs/sha256:{hashed}"
response = requests.post(url, data=f)
if response.ok:
res = {
"done": done,
"blob": f"sha256:{hashed}",
"name": file.filename,
}
os.remove(file_path)
yield f"data: {json.dumps(res)}\n\n"
else:
raise "Ollama: Could not create blob, Please try again."
except Exception as e:
res = {"error": str(e)}
yield f"data: {json.dumps(res)}\n\n"
return StreamingResponse(file_write_stream(), media_type="text/event-stream")
...@@ -30,7 +30,7 @@ if ENV == "prod": ...@@ -30,7 +30,7 @@ if ENV == "prod":
# WEBUI_VERSION # WEBUI_VERSION
#################################### ####################################
WEBUI_VERSION = os.environ.get("WEBUI_VERSION", "v1.0.0-alpha.34") WEBUI_VERSION = os.environ.get("WEBUI_VERSION", "v1.0.0-alpha.40")
#################################### ####################################
# WEBUI_AUTH # WEBUI_AUTH
......
...@@ -12,6 +12,7 @@ passlib[bcrypt] ...@@ -12,6 +12,7 @@ passlib[bcrypt]
uuid uuid
requests requests
aiohttp
pymongo pymongo
bcrypt bcrypt
......
...@@ -13,3 +13,11 @@ def get_gravatar_url(email): ...@@ -13,3 +13,11 @@ def get_gravatar_url(email):
# Grab the actual image URL # Grab the actual image URL
return f"https://www.gravatar.com/avatar/{hash_hex}?d=mp" return f"https://www.gravatar.com/avatar/{hash_hex}?d=mp"
def calculate_sha256(file):
sha256 = hashlib.sha256()
# Read the file in chunks to efficiently handle large files
for chunk in iter(lambda: file.read(8192), b""):
sha256.update(chunk)
return sha256.hexdigest()
...@@ -155,7 +155,7 @@ ...@@ -155,7 +155,7 @@
<div class="fixed bottom-0 w-full"> <div class="fixed bottom-0 w-full">
<div class="px-2.5 pt-2.5 -mb-0.5 mx-auto inset-x-0 bg-transparent flex justify-center"> <div class="px-2.5 pt-2.5 -mb-0.5 mx-auto inset-x-0 bg-transparent flex justify-center">
{#if messages.length == 0 && suggestionPrompts.length !== 0} {#if messages.length == 0 && suggestionPrompts.length !== 0}
<div class="max-w-3xl"> <div class="max-w-3xl w-full">
<Suggestions {suggestionPrompts} {submitPrompt} /> <Suggestions {suggestionPrompts} {submitPrompt} />
</div> </div>
{/if} {/if}
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
export let suggestionPrompts = []; export let suggestionPrompts = [];
</script> </script>
<div class=" flex flex-wrap-reverse mb-3 md:p-1 text-left"> <div class=" flex flex-wrap-reverse mb-3 md:p-1 text-left w-full">
{#each suggestionPrompts as prompt, promptIdx} {#each suggestionPrompts as prompt, promptIdx}
<div class="{promptIdx > 1 ? 'hidden sm:inline-flex' : ''} basis-full sm:basis-1/2 p-[5px]"> <div class="{promptIdx > 1 ? 'hidden sm:inline-flex' : ''} basis-full sm:basis-1/2 p-[5px]">
<button <button
......
...@@ -8,7 +8,8 @@ export const OLLAMA_API_BASE_URL = ...@@ -8,7 +8,8 @@ export const OLLAMA_API_BASE_URL =
: `http://localhost:11434/api` : `http://localhost:11434/api`
: PUBLIC_API_BASE_URL; : PUBLIC_API_BASE_URL;
export const WEBUI_API_BASE_URL = dev ? `http://${location.hostname}:8080/api/v1` : `/api/v1`; export const WEBUI_BASE_URL = dev ? `http://${location.hostname}:8080` : ``;
export const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;
export const WEB_UI_VERSION = 'v1.0.0-alpha-static'; export const WEB_UI_VERSION = 'v1.0.0-alpha-static';
......
...@@ -55,7 +55,9 @@ ...@@ -55,7 +55,9 @@
// If OpenAI API Key exists // If OpenAI API Key exists
if ($settings.OPENAI_API_KEY) { if ($settings.OPENAI_API_KEY) {
// Validate OPENAI_API_KEY // Validate OPENAI_API_KEY
const openaiModelRes = await fetch(`https://api.openai.com/v1/models`, {
const API_BASE_URL = $settings.OPENAI_API_BASE_URL ?? 'https://api.openai.com/v1';
const openaiModelRes = await fetch(`${API_BASE_URL}/models`, {
method: 'GET', method: 'GET',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
...@@ -72,15 +74,19 @@ ...@@ -72,15 +74,19 @@
return null; return null;
}); });
const openAIModels = openaiModelRes?.data ?? null; const openAIModels = Array.isArray(openaiModelRes)
? openaiModelRes
: openaiModelRes?.data ?? null;
models.push( models.push(
...(openAIModels ...(openAIModels
? [ ? [
{ name: 'hr' }, { name: 'hr' },
...openAIModels ...openAIModels
.map((model) => ({ name: model.id, label: 'OpenAI' })) .map((model) => ({ name: model.id, external: true }))
.filter((model) => model.name.includes('gpt')) .filter((model) =>
API_BASE_URL.includes('openai') ? model.name.includes('gpt') : true
)
] ]
: []) : [])
); );
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
import { splitStream } from '$lib/utils'; import { splitStream } from '$lib/utils';
import { goto } from '$app/navigation'; import { goto } from '$app/navigation';
import { config, modelfiles, user, settings, db, chats, chatId } from '$lib/stores'; import { config, models, modelfiles, user, settings, db, chats, chatId } from '$lib/stores';
import MessageInput from '$lib/components/chat/MessageInput.svelte'; import MessageInput from '$lib/components/chat/MessageInput.svelte';
import Messages from '$lib/components/chat/Messages.svelte'; import Messages from '$lib/components/chat/Messages.svelte';
...@@ -130,7 +130,8 @@ ...@@ -130,7 +130,8 @@
const sendPrompt = async (userPrompt, parentId, _chatId) => { const sendPrompt = async (userPrompt, parentId, _chatId) => {
await Promise.all( await Promise.all(
selectedModels.map(async (model) => { selectedModels.map(async (model) => {
if (model.includes('gpt-')) { console.log(model);
if ($models.filter((m) => m.name === model)[0].external) {
await sendPromptOpenAI(model, userPrompt, parentId, _chatId); await sendPromptOpenAI(model, userPrompt, parentId, _chatId);
} else { } else {
await sendPromptOllama(model, userPrompt, parentId, _chatId); await sendPromptOllama(model, userPrompt, parentId, _chatId);
...@@ -244,6 +245,13 @@ ...@@ -244,6 +245,13 @@
} }
} else { } else {
responseMessage.done = true; responseMessage.done = true;
if (responseMessage.content == '') {
responseMessage.error = true;
responseMessage.content =
'Oops! No text generated from Ollama, Please try again.';
}
responseMessage.context = data.context ?? null; responseMessage.context = data.context ?? null;
responseMessage.info = { responseMessage.info = {
total_duration: data.total_duration, total_duration: data.total_duration,
...@@ -364,132 +372,162 @@ ...@@ -364,132 +372,162 @@
]; ];
} }
await tick();
window.scrollTo({ top: document.body.scrollHeight }); window.scrollTo({ top: document.body.scrollHeight });
const res = await fetch(`https://api.openai.com/v1/chat/completions`, { const res = await fetch(
method: 'POST', `${$settings.OPENAI_API_BASE_URL ?? 'https://api.openai.com/v1'}/chat/completions`,
headers: { {
'Content-Type': 'application/json', method: 'POST',
Authorization: `Bearer ${$settings.OPENAI_API_KEY}` headers: {
}, Authorization: `Bearer ${$settings.OPENAI_API_KEY}`,
body: JSON.stringify({ 'Content-Type': 'application/json'
model: model, },
stream: true, body: JSON.stringify({
messages: [ model: model,
$settings.system stream: true,
? { messages: [
role: 'system', $settings.system
content: $settings.system
}
: undefined,
...messages
]
.filter((message) => message)
.map((message) => ({
role: message.role,
...(message.files
? { ? {
content: [ role: 'system',
{ content: $settings.system
type: 'text',
text: message.content
},
...message.files
.filter((file) => file.type === 'image')
.map((file) => ({
type: 'image_url',
image_url: {
url: file.url
}
}))
]
} }
: { content: message.content }) : undefined,
})), ...messages
temperature: $settings.temperature ?? undefined, ]
top_p: $settings.top_p ?? undefined, .filter((message) => message)
num_ctx: $settings.num_ctx ?? undefined, .map((message) => ({
frequency_penalty: $settings.repeat_penalty ?? undefined role: message.role,
}) ...(message.files
}); ? {
content: [
const reader = res.body {
.pipeThrough(new TextDecoderStream()) type: 'text',
.pipeThrough(splitStream('\n')) text: message.content
.getReader(); },
...message.files
while (true) { .filter((file) => file.type === 'image')
const { value, done } = await reader.read(); .map((file) => ({
if (done || stopResponseFlag || _chatId !== $chatId) { type: 'image_url',
responseMessage.done = true; image_url: {
messages = messages; url: file.url
break; }
}))
]
}
: { content: message.content })
})),
temperature: $settings.temperature ?? undefined,
top_p: $settings.top_p ?? undefined,
num_ctx: $settings.num_ctx ?? undefined,
frequency_penalty: $settings.repeat_penalty ?? undefined
})
} }
).catch((err) => {
console.log(err);
return null;
});
try { if (res && res.ok) {
let lines = value.split('\n'); const reader = res.body
.pipeThrough(new TextDecoderStream())
.pipeThrough(splitStream('\n'))
.getReader();
while (true) {
const { value, done } = await reader.read();
if (done || stopResponseFlag || _chatId !== $chatId) {
responseMessage.done = true;
messages = messages;
break;
}
for (const line of lines) { try {
if (line !== '') { let lines = value.split('\n');
console.log(line);
if (line === 'data: [DONE]') {
responseMessage.done = true;
messages = messages;
} else {
let data = JSON.parse(line.replace(/^data: /, ''));
console.log(data);
if (responseMessage.content == '' && data.choices[0].delta.content == '\n') { for (const line of lines) {
continue; if (line !== '') {
} else { console.log(line);
responseMessage.content += data.choices[0].delta.content ?? ''; if (line === 'data: [DONE]') {
responseMessage.done = true;
messages = messages; messages = messages;
} else {
let data = JSON.parse(line.replace(/^data: /, ''));
console.log(data);
if (responseMessage.content == '' && data.choices[0].delta.content == '\n') {
continue;
} else {
responseMessage.content += data.choices[0].delta.content ?? '';
messages = messages;
}
} }
} }
} }
} catch (error) {
console.log(error);
} }
} catch (error) {
console.log(error);
}
if (autoScroll) { if ($settings.notificationEnabled && !document.hasFocus()) {
window.scrollTo({ top: document.body.scrollHeight }); const notification = new Notification(`OpenAI ${model}`, {
} body: responseMessage.content,
icon: '/favicon.png'
});
}
await $db.updateChatById(_chatId, { if ($settings.responseAutoCopy) {
title: title === '' ? 'New Chat' : title, copyToClipboard(responseMessage.content);
models: selectedModels, }
system: $settings.system ?? undefined,
options: {
seed: $settings.seed ?? undefined,
temperature: $settings.temperature ?? undefined,
repeat_penalty: $settings.repeat_penalty ?? undefined,
top_k: $settings.top_k ?? undefined,
top_p: $settings.top_p ?? undefined,
num_ctx: $settings.num_ctx ?? undefined,
...($settings.options ?? {})
},
messages: messages,
history: history
});
}
stopResponseFlag = false; if (autoScroll) {
window.scrollTo({ top: document.body.scrollHeight });
}
await tick(); await $db.updateChatById(_chatId, {
title: title === '' ? 'New Chat' : title,
models: selectedModels,
system: $settings.system ?? undefined,
options: {
seed: $settings.seed ?? undefined,
temperature: $settings.temperature ?? undefined,
repeat_penalty: $settings.repeat_penalty ?? undefined,
top_k: $settings.top_k ?? undefined,
top_p: $settings.top_p ?? undefined,
num_ctx: $settings.num_ctx ?? undefined,
...($settings.options ?? {})
},
messages: messages,
history: history
});
}
} else {
if (res !== null) {
const error = await res.json();
console.log(error);
if ('detail' in error) {
toast.error(error.detail);
responseMessage.content = error.detail;
} else {
if ('message' in error.error) {
toast.error(error.error.message);
responseMessage.content = error.error.message;
} else {
toast.error(error.error);
responseMessage.content = error.error;
}
}
} else {
toast.error(`Uh-oh! There was an issue connecting to ${model}.`);
responseMessage.content = `Uh-oh! There was an issue connecting to ${model}.`;
}
if ($settings.notificationEnabled && !document.hasFocus()) { responseMessage.error = true;
const notification = new Notification(`OpenAI ${model}`, { responseMessage.content = `Uh-oh! There was an issue connecting to ${model}.`;
body: responseMessage.content, responseMessage.done = true;
icon: '/favicon.png' messages = messages;
});
} }
if ($settings.responseAutoCopy) { stopResponseFlag = false;
copyToClipboard(responseMessage.content); await tick();
}
if (autoScroll) { if (autoScroll) {
window.scrollTo({ top: document.body.scrollHeight }); window.scrollTo({ top: document.body.scrollHeight });
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
import { onMount, tick } from 'svelte'; import { onMount, tick } from 'svelte';
import { convertMessagesToHistory, splitStream } from '$lib/utils'; import { convertMessagesToHistory, splitStream } from '$lib/utils';
import { goto } from '$app/navigation'; import { goto } from '$app/navigation';
import { config, modelfiles, user, settings, db, chats, chatId } from '$lib/stores'; import { config, models, modelfiles, user, settings, db, chats, chatId } from '$lib/stores';
import MessageInput from '$lib/components/chat/MessageInput.svelte'; import MessageInput from '$lib/components/chat/MessageInput.svelte';
import Messages from '$lib/components/chat/Messages.svelte'; import Messages from '$lib/components/chat/Messages.svelte';
...@@ -144,7 +144,8 @@ ...@@ -144,7 +144,8 @@
const sendPrompt = async (userPrompt, parentId, _chatId) => { const sendPrompt = async (userPrompt, parentId, _chatId) => {
await Promise.all( await Promise.all(
selectedModels.map(async (model) => { selectedModels.map(async (model) => {
if (model.includes('gpt-')) { console.log(model);
if ($models.filter((m) => m.name === model)[0].external) {
await sendPromptOpenAI(model, userPrompt, parentId, _chatId); await sendPromptOpenAI(model, userPrompt, parentId, _chatId);
} else { } else {
await sendPromptOllama(model, userPrompt, parentId, _chatId); await sendPromptOllama(model, userPrompt, parentId, _chatId);
...@@ -258,6 +259,13 @@ ...@@ -258,6 +259,13 @@
} }
} else { } else {
responseMessage.done = true; responseMessage.done = true;
if (responseMessage.content == '') {
responseMessage.error = true;
responseMessage.content =
'Oops! No text generated from Ollama, Please try again.';
}
responseMessage.context = data.context ?? null; responseMessage.context = data.context ?? null;
responseMessage.info = { responseMessage.info = {
total_duration: data.total_duration, total_duration: data.total_duration,
...@@ -378,132 +386,162 @@ ...@@ -378,132 +386,162 @@
]; ];
} }
await tick();
window.scrollTo({ top: document.body.scrollHeight }); window.scrollTo({ top: document.body.scrollHeight });
const res = await fetch(`https://api.openai.com/v1/chat/completions`, { const res = await fetch(
method: 'POST', `${$settings.OPENAI_API_BASE_URL ?? 'https://api.openai.com/v1'}/chat/completions`,
headers: { {
'Content-Type': 'application/json', method: 'POST',
Authorization: `Bearer ${$settings.OPENAI_API_KEY}` headers: {
}, Authorization: `Bearer ${$settings.OPENAI_API_KEY}`,
body: JSON.stringify({ 'Content-Type': 'application/json'
model: model, },
stream: true, body: JSON.stringify({
messages: [ model: model,
$settings.system stream: true,
? { messages: [
role: 'system', $settings.system
content: $settings.system
}
: undefined,
...messages
]
.filter((message) => message)
.map((message) => ({
role: message.role,
...(message.files
? { ? {
content: [ role: 'system',
{ content: $settings.system
type: 'text',
text: message.content
},
...message.files
.filter((file) => file.type === 'image')
.map((file) => ({
type: 'image_url',
image_url: {
url: file.url
}
}))
]
} }
: { content: message.content }) : undefined,
})), ...messages
temperature: $settings.temperature ?? undefined, ]
top_p: $settings.top_p ?? undefined, .filter((message) => message)
num_ctx: $settings.num_ctx ?? undefined, .map((message) => ({
frequency_penalty: $settings.repeat_penalty ?? undefined role: message.role,
}) ...(message.files
}); ? {
content: [
const reader = res.body {
.pipeThrough(new TextDecoderStream()) type: 'text',
.pipeThrough(splitStream('\n')) text: message.content
.getReader(); },
...message.files
while (true) { .filter((file) => file.type === 'image')
const { value, done } = await reader.read(); .map((file) => ({
if (done || stopResponseFlag || _chatId !== $chatId) { type: 'image_url',
responseMessage.done = true; image_url: {
messages = messages; url: file.url
break; }
}))
]
}
: { content: message.content })
})),
temperature: $settings.temperature ?? undefined,
top_p: $settings.top_p ?? undefined,
num_ctx: $settings.num_ctx ?? undefined,
frequency_penalty: $settings.repeat_penalty ?? undefined
})
} }
).catch((err) => {
console.log(err);
return null;
});
try { if (res && res.ok) {
let lines = value.split('\n'); const reader = res.body
.pipeThrough(new TextDecoderStream())
.pipeThrough(splitStream('\n'))
.getReader();
while (true) {
const { value, done } = await reader.read();
if (done || stopResponseFlag || _chatId !== $chatId) {
responseMessage.done = true;
messages = messages;
break;
}
for (const line of lines) { try {
if (line !== '') { let lines = value.split('\n');
console.log(line);
if (line === 'data: [DONE]') {
responseMessage.done = true;
messages = messages;
} else {
let data = JSON.parse(line.replace(/^data: /, ''));
console.log(data);
if (responseMessage.content == '' && data.choices[0].delta.content == '\n') { for (const line of lines) {
continue; if (line !== '') {
} else { console.log(line);
responseMessage.content += data.choices[0].delta.content ?? ''; if (line === 'data: [DONE]') {
responseMessage.done = true;
messages = messages; messages = messages;
} else {
let data = JSON.parse(line.replace(/^data: /, ''));
console.log(data);
if (responseMessage.content == '' && data.choices[0].delta.content == '\n') {
continue;
} else {
responseMessage.content += data.choices[0].delta.content ?? '';
messages = messages;
}
} }
} }
} }
} catch (error) {
console.log(error);
} }
} catch (error) {
console.log(error);
}
if (autoScroll) { if ($settings.notificationEnabled && !document.hasFocus()) {
window.scrollTo({ top: document.body.scrollHeight }); const notification = new Notification(`OpenAI ${model}`, {
} body: responseMessage.content,
icon: '/favicon.png'
});
}
await $db.updateChatById(_chatId, { if ($settings.responseAutoCopy) {
title: title === '' ? 'New Chat' : title, copyToClipboard(responseMessage.content);
models: selectedModels, }
system: $settings.system ?? undefined,
options: {
seed: $settings.seed ?? undefined,
temperature: $settings.temperature ?? undefined,
repeat_penalty: $settings.repeat_penalty ?? undefined,
top_k: $settings.top_k ?? undefined,
top_p: $settings.top_p ?? undefined,
num_ctx: $settings.num_ctx ?? undefined,
...($settings.options ?? {})
},
messages: messages,
history: history
});
}
stopResponseFlag = false; if (autoScroll) {
window.scrollTo({ top: document.body.scrollHeight });
}
await tick(); await $db.updateChatById(_chatId, {
title: title === '' ? 'New Chat' : title,
models: selectedModels,
system: $settings.system ?? undefined,
options: {
seed: $settings.seed ?? undefined,
temperature: $settings.temperature ?? undefined,
repeat_penalty: $settings.repeat_penalty ?? undefined,
top_k: $settings.top_k ?? undefined,
top_p: $settings.top_p ?? undefined,
num_ctx: $settings.num_ctx ?? undefined,
...($settings.options ?? {})
},
messages: messages,
history: history
});
}
} else {
if (res !== null) {
const error = await res.json();
console.log(error);
if ('detail' in error) {
toast.error(error.detail);
responseMessage.content = error.detail;
} else {
if ('message' in error.error) {
toast.error(error.error.message);
responseMessage.content = error.error.message;
} else {
toast.error(error.error);
responseMessage.content = error.error;
}
}
} else {
toast.error(`Uh-oh! There was an issue connecting to ${model}.`);
responseMessage.content = `Uh-oh! There was an issue connecting to ${model}.`;
}
if ($settings.notificationEnabled && !document.hasFocus()) { responseMessage.error = true;
const notification = new Notification(`OpenAI ${model}`, { responseMessage.content = `Uh-oh! There was an issue connecting to ${model}.`;
body: responseMessage.content, responseMessage.done = true;
icon: '/favicon.png' messages = messages;
});
} }
if ($settings.responseAutoCopy) { stopResponseFlag = false;
copyToClipboard(responseMessage.content); await tick();
}
if (autoScroll) { if (autoScroll) {
window.scrollTo({ top: document.body.scrollHeight }); window.scrollTo({ top: document.body.scrollHeight });
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment