Unverified Commit a0935dec authored by Timothy Jaeryang Baek's avatar Timothy Jaeryang Baek Committed by GitHub
Browse files

Merge branch 'dev' into feature/support_auth_by_api_key

parents b4b56f9c 587a8c59
......@@ -19,7 +19,7 @@
} from '$lib/stores';
import { copyToClipboard, splitStream } from '$lib/utils';
import { generateChatCompletion, cancelOllamaRequest, generateTitle } from '$lib/apis/ollama';
import { generateChatCompletion, cancelOllamaRequest } from '$lib/apis/ollama';
import {
addTagById,
createNewChat,
......@@ -30,14 +30,14 @@
updateChatById
} from '$lib/apis/chats';
import { queryCollection, queryDoc } from '$lib/apis/rag';
import { generateOpenAIChatCompletion } from '$lib/apis/openai';
import { generateOpenAIChatCompletion, generateTitle } from '$lib/apis/openai';
import MessageInput from '$lib/components/chat/MessageInput.svelte';
import Messages from '$lib/components/chat/Messages.svelte';
import ModelSelector from '$lib/components/chat/ModelSelector.svelte';
import Navbar from '$lib/components/layout/Navbar.svelte';
import { RAGTemplate } from '$lib/utils/rag';
import { LITELLM_API_BASE_URL, OPENAI_API_BASE_URL } from '$lib/constants';
import { LITELLM_API_BASE_URL, OLLAMA_API_BASE_URL, OPENAI_API_BASE_URL } from '$lib/constants';
import { WEBUI_BASE_URL } from '$lib/constants';
const i18n = getContext('i18n');
......@@ -48,6 +48,7 @@
let messagesContainerElement: HTMLDivElement;
let currentRequestId = null;
let showModelSelector = false;
let selectedModels = [''];
let selectedModelfile = null;
......@@ -511,12 +512,17 @@
if (messages.length == 2 && messages.at(1).content !== '') {
window.history.replaceState(history.state, '', `/c/${_chatId}`);
await generateChatTitle(_chatId, userPrompt);
const _title = await generateChatTitle(userPrompt);
await setChatTitle(_chatId, _title);
}
};
const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
const responseMessage = history.messages[responseMessageId];
// Wait until history/message have been updated
await tick();
scrollToBottom();
const docs = messages
......@@ -528,6 +534,8 @@
console.log(docs);
console.log(model);
const res = await generateOpenAIChatCompletion(
localStorage.token,
{
......@@ -580,7 +588,9 @@
max_tokens: $settings?.options?.num_predict ?? undefined,
docs: docs.length > 0 ? docs : undefined
},
model.source === 'litellm' ? `${LITELLM_API_BASE_URL}/v1` : `${OPENAI_API_BASE_URL}`
model?.source?.toLowerCase() === 'litellm'
? `${LITELLM_API_BASE_URL}/v1`
: `${OPENAI_API_BASE_URL}`
);
if (res && res.ok) {
......@@ -696,11 +706,8 @@
if (messages.length == 2) {
window.history.replaceState(history.state, '', `/c/${_chatId}`);
if ($settings?.titleAutoGenerateModel) {
await generateChatTitle(_chatId, userPrompt);
} else {
await setChatTitle(_chatId, userPrompt);
}
const _title = await generateChatTitle(userPrompt);
await setChatTitle(_chatId, _title);
}
};
......@@ -754,23 +761,46 @@
}
};
const generateChatTitle = async (_chatId, userPrompt) => {
if ($settings.titleAutoGenerate ?? true) {
const generateChatTitle = async (userPrompt) => {
if ($settings?.title?.auto ?? true) {
const model = $models.find((model) => model.id === selectedModels[0]);
const titleModelId =
model?.external ?? false
? $settings?.title?.modelExternal ?? selectedModels[0]
: $settings?.title?.model ?? selectedModels[0];
const titleModel = $models.find((model) => model.id === titleModelId);
console.log(titleModel);
const title = await generateTitle(
localStorage.token,
$settings?.titleGenerationPrompt ??
$settings?.title?.prompt ??
$i18n.t(
"Create a concise, 3-5 word phrase as a header for the following query, strictly adhering to the 3-5 word limit and avoiding the use of the word 'title':"
) + ' {{prompt}}',
$settings?.titleAutoGenerateModel ?? selectedModels[0],
userPrompt
titleModelId,
userPrompt,
titleModel?.external ?? false
? titleModel?.source?.toLowerCase() === 'litellm'
? `${LITELLM_API_BASE_URL}/v1`
: `${OPENAI_API_BASE_URL}`
: `${OLLAMA_API_BASE_URL}/v1`
);
if (title) {
await setChatTitle(_chatId, title);
}
return title;
} else {
await setChatTitle(_chatId, `${userPrompt}`);
return `${userPrompt}`;
}
};
const setChatTitle = async (_chatId, _title) => {
if (_chatId === $chatId) {
title = _title;
}
if ($settings.saveChatHistory ?? true) {
chat = await updateChatById(localStorage.token, _chatId, { title: _title });
await chats.set(await getChatList(localStorage.token));
}
};
......@@ -801,17 +831,6 @@
_tags.set(await getAllChatTags(localStorage.token));
};
const setChatTitle = async (_chatId, _title) => {
if (_chatId === $chatId) {
title = _title;
}
if ($settings.saveChatHistory ?? true) {
chat = await updateChatById(localStorage.token, _chatId, { title: _title });
await chats.set(await getChatList(localStorage.token));
}
};
</script>
<svelte:head>
......@@ -823,7 +842,16 @@
</svelte:head>
<div class="h-screen max-h-[100dvh] w-full flex flex-col">
<Navbar {title} shareEnabled={messages.length > 0} {initNewChat} {tags} {addTag} {deleteTag} />
<Navbar
{title}
bind:selectedModels
bind:showModelSelector
shareEnabled={messages.length > 0}
{initNewChat}
{tags}
{addTag}
{deleteTag}
/>
<div class="flex flex-col flex-auto">
<div
class=" pb-2.5 flex flex-col justify-between w-full flex-auto overflow-auto h-0"
......@@ -835,15 +863,7 @@
messagesContainerElement.clientHeight + 5;
}}
>
<div
class="{$settings?.fullScreenMode ?? null
? 'max-w-full'
: 'max-w-2xl md:px-0'} mx-auto w-full px-4"
>
<ModelSelector bind:selectedModels />
</div>
<div class=" h-full w-full flex flex-col py-8">
<div class=" h-full w-full flex flex-col pt-2 pb-4">
<Messages
chatId={$chatId}
{selectedModels}
......
......@@ -19,7 +19,7 @@
} from '$lib/stores';
import { copyToClipboard, splitStream, convertMessagesToHistory } from '$lib/utils';
import { generateChatCompletion, generateTitle, cancelOllamaRequest } from '$lib/apis/ollama';
import { generateChatCompletion, cancelOllamaRequest } from '$lib/apis/ollama';
import {
addTagById,
createNewChat,
......@@ -31,14 +31,19 @@
updateChatById
} from '$lib/apis/chats';
import { queryCollection, queryDoc } from '$lib/apis/rag';
import { generateOpenAIChatCompletion } from '$lib/apis/openai';
import { generateOpenAIChatCompletion, generateTitle } from '$lib/apis/openai';
import MessageInput from '$lib/components/chat/MessageInput.svelte';
import Messages from '$lib/components/chat/Messages.svelte';
import ModelSelector from '$lib/components/chat/ModelSelector.svelte';
import Navbar from '$lib/components/layout/Navbar.svelte';
import { RAGTemplate } from '$lib/utils/rag';
import { LITELLM_API_BASE_URL, OPENAI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants';
import {
LITELLM_API_BASE_URL,
OPENAI_API_BASE_URL,
OLLAMA_API_BASE_URL,
WEBUI_BASE_URL
} from '$lib/constants';
const i18n = getContext('i18n');
......@@ -51,8 +56,10 @@
let currentRequestId = null;
// let chatId = $page.params.id;
let showModelSelector = false;
let selectedModels = [''];
let selectedModelfile = null;
$: selectedModelfile =
selectedModels.length === 1 &&
$modelfiles.filter((modelfile) => modelfile.tagName === selectedModels[0]).length > 0
......@@ -521,12 +528,17 @@
if (messages.length == 2 && messages.at(1).content !== '') {
window.history.replaceState(history.state, '', `/c/${_chatId}`);
await generateChatTitle(_chatId, userPrompt);
const _title = await generateChatTitle(userPrompt);
await setChatTitle(_chatId, _title);
}
};
const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
const responseMessage = history.messages[responseMessageId];
// Wait until history/message have been updated
await tick();
scrollToBottom();
const docs = messages
......@@ -590,7 +602,9 @@
max_tokens: $settings?.options?.num_predict ?? undefined,
docs: docs.length > 0 ? docs : undefined
},
model.source === 'litellm' ? `${LITELLM_API_BASE_URL}/v1` : `${OPENAI_API_BASE_URL}`
model?.source?.toLowerCase() === 'litellm'
? `${LITELLM_API_BASE_URL}/v1`
: `${OPENAI_API_BASE_URL}`
);
if (res && res.ok) {
......@@ -706,11 +720,8 @@
if (messages.length == 2) {
window.history.replaceState(history.state, '', `/c/${_chatId}`);
if ($settings?.titleAutoGenerateModel) {
await generateChatTitle(_chatId, userPrompt);
} else {
await setChatTitle(_chatId, userPrompt);
}
const _title = await generateChatTitle(userPrompt);
await setChatTitle(_chatId, _title);
}
};
......@@ -719,6 +730,19 @@
console.log('stopResponse');
};
const regenerateResponse = async () => {
console.log('regenerateResponse');
if (messages.length != 0 && messages.at(-1).done == true) {
messages.splice(messages.length - 1, 1);
messages = messages;
let userMessage = messages.at(-1);
let userPrompt = userMessage.content;
await sendPrompt(userPrompt, userMessage.id);
}
};
const continueGeneration = async () => {
console.log('continueGeneration');
const _chatId = JSON.parse(JSON.stringify($chatId));
......@@ -751,36 +775,35 @@
}
};
const regenerateResponse = async () => {
console.log('regenerateResponse');
if (messages.length != 0 && messages.at(-1).done == true) {
messages.splice(messages.length - 1, 1);
messages = messages;
let userMessage = messages.at(-1);
let userPrompt = userMessage.content;
const generateChatTitle = async (userPrompt) => {
if ($settings?.title?.auto ?? true) {
const model = $models.find((model) => model.id === selectedModels[0]);
await sendPrompt(userPrompt, userMessage.id);
}
};
const titleModelId =
model?.external ?? false
? $settings?.title?.modelExternal ?? selectedModels[0]
: $settings?.title?.model ?? selectedModels[0];
const titleModel = $models.find((model) => model.id === titleModelId);
const generateChatTitle = async (_chatId, userPrompt) => {
if ($settings.titleAutoGenerate ?? true) {
console.log(titleModel);
const title = await generateTitle(
localStorage.token,
$settings?.titleGenerationPrompt ??
$settings?.title?.prompt ??
$i18n.t(
"Create a concise, 3-5 word phrase as a header for the following query, strictly adhering to the 3-5 word limit and avoiding the use of the word 'title':"
) + ' {{prompt}}',
$settings?.titleAutoGenerateModel ?? selectedModels[0],
userPrompt
titleModelId,
userPrompt,
titleModel?.external ?? false
? titleModel?.source?.toLowerCase() === 'litellm'
? `${LITELLM_API_BASE_URL}/v1`
: `${OPENAI_API_BASE_URL}`
: `${OLLAMA_API_BASE_URL}/v1`
);
if (title) {
await setChatTitle(_chatId, title);
}
return title;
} else {
await setChatTitle(_chatId, `${userPrompt}`);
return `${userPrompt}`;
}
};
......@@ -789,8 +812,10 @@
title = _title;
}
chat = await updateChatById(localStorage.token, _chatId, { title: _title });
await chats.set(await getChatList(localStorage.token));
if ($settings.saveChatHistory ?? true) {
chat = await updateChatById(localStorage.token, _chatId, { title: _title });
await chats.set(await getChatList(localStorage.token));
}
};
const getTags = async () => {
......@@ -840,6 +865,8 @@
<div class="min-h-screen max-h-screen w-full flex flex-col">
<Navbar
{title}
bind:selectedModels
bind:showModelSelector
shareEnabled={messages.length > 0}
initNewChat={async () => {
if (currentRequestId !== null) {
......@@ -864,15 +891,7 @@
messagesContainerElement.clientHeight + 5;
}}
>
<div
class="{$settings?.fullScreenMode ?? null
? 'max-w-full'
: 'max-w-2xl md:px-0'} mx-auto w-full px-4"
>
<ModelSelector bind:selectedModels />
</div>
<div class=" h-full w-full flex flex-col py-8">
<div class=" h-full w-full flex flex-col py-4">
<Messages
chatId={$chatId}
{selectedModels}
......
......@@ -18,6 +18,7 @@
import { splitStream } from '$lib/utils';
import ChatCompletion from '$lib/components/playground/ChatCompletion.svelte';
import Selector from '$lib/components/chat/ModelSelector/Selector.svelte';
const i18n = getContext('i18n');
......@@ -315,27 +316,24 @@
</div>
</div>
<div class=" flex gap-1 px-1">
<select
id="models"
class="outline-none bg-transparent text-sm font-medium rounded-lg w-full placeholder-gray-400"
bind:value={selectedModelId}
>
<option class=" text-gray-800" value="" selected disabled
>{$i18n.t('Select a model')}</option
>
{#each $models as model}
{#if model.name === 'hr'}
<hr />
{:else}
<option value={model.id} class="text-gray-800 text-lg"
>{model.name +
`${model.size ? ` (${(model.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}</option
>
{/if}
{/each}
</select>
<div class="flex flex-col gap-1 px-1 w-full">
<div class="flex w-full">
<div class="overflow-hidden w-full">
<div class="max-w-full">
<Selector
placeholder={$i18n.t('Select a model')}
items={$models
.filter((model) => model.name !== 'hr')
.map((model) => ({
value: model.id,
label: model.name,
info: model
}))}
bind:value={selectedModelId}
/>
</div>
</div>
</div>
<!-- <button
class=" self-center dark:hover:text-gray-300"
......
......@@ -11,7 +11,7 @@
import '../tailwind.css';
import 'tippy.js/dist/tippy.css';
import { WEBUI_BASE_URL } from '$lib/constants';
import i18n from '$lib/i18n';
import i18n, { initI18n } from '$lib/i18n';
setContext('i18n', i18n);
......@@ -25,6 +25,11 @@
if (backendConfig) {
// Save Backend Status to Store
await config.set(backendConfig);
if ($config.default_locale) {
initI18n($config.default_locale);
} else {
initI18n();
}
await WEBUI_NAME.set(backendConfig.name);
console.log(backendConfig);
......
......@@ -3,13 +3,14 @@
@tailwind utilities;
@layer base {
html, pre {
html,
pre {
font-family: -apple-system, 'Arimo', ui-sans-serif, system-ui, 'Segoe UI', Roboto, Ubuntu,
Cantarell, 'Noto Sans', sans-serif, 'Helvetica Neue', Arial, 'Apple Color Emoji',
'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji';
}
pre {
white-space: pre-wrap;
}
pre {
white-space: pre-wrap;
}
}
......@@ -13,4 +13,4 @@
"sizes": "844x884"
}
]
}
\ No newline at end of file
}
......@@ -16,6 +16,12 @@ const config = {
assets: 'build',
fallback: 'index.html'
})
},
onwarn: (warning, handler) => {
const { code, _ } = warning;
if (code === 'css-unused-selector') return;
handler(warning);
}
};
......
......@@ -16,9 +16,8 @@ export default {
700: '#4e4e4e',
800: '#333',
850: '#262626',
900: '#171717',
950: '#0d0d0d'
900: 'var(--color-gray-900, #171717)',
950: 'var(--color-gray-950, #0d0d0d)'
}
},
typography: {
......
{
"model_name": "string",
"litellm_params": {
"model": "ollama/mistral"
}
}
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment