Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
open-webui
Commits
02fb517b
Unverified
Commit
02fb517b
authored
Feb 25, 2024
by
Timothy Jaeryang Baek
Committed by
GitHub
Feb 25, 2024
Browse files
Merge pull request #907 from open-webui/dev
0.1.103
parents
6676f0b4
0bd3ec9e
Changes
27
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
770 additions
and
437 deletions
+770
-437
src/lib/components/chat/Settings/Images.svelte
src/lib/components/chat/Settings/Images.svelte
+29
-6
src/lib/components/chat/Settings/Models.svelte
src/lib/components/chat/Settings/Models.svelte
+596
-265
src/lib/components/chat/SettingsModal.svelte
src/lib/components/chat/SettingsModal.svelte
+21
-14
src/lib/constants.ts
src/lib/constants.ts
+2
-0
src/routes/(app)/+layout.svelte
src/routes/(app)/+layout.svelte
+23
-74
src/routes/(app)/+page.svelte
src/routes/(app)/+page.svelte
+93
-78
test.json
test.json
+6
-0
No files found.
src/lib/components/chat/Settings/Images.svelte
View file @
02fb517b
...
@@ -12,7 +12,9 @@
...
@@ -12,7 +12,9 @@
toggleImageGenerationEnabledStatus,
toggleImageGenerationEnabledStatus,
updateAUTOMATIC1111Url,
updateAUTOMATIC1111Url,
updateDefaultDiffusionModel,
updateDefaultDiffusionModel,
updateImageSize
updateImageSize,
getImageSteps,
updateImageSteps
} from '$lib/apis/images';
} from '$lib/apis/images';
import { getBackendConfig } from '$lib/apis';
import { getBackendConfig } from '$lib/apis';
const dispatch = createEventDispatcher();
const dispatch = createEventDispatcher();
...
@@ -21,20 +23,23 @@
...
@@ -21,20 +23,23 @@
let loading = false;
let loading = false;
let enableImageGeneration =
tru
e;
let enableImageGeneration =
fals
e;
let AUTOMATIC1111_BASE_URL = '';
let AUTOMATIC1111_BASE_URL = '';
let selectedModel = '';
let selectedModel = '';
let models =
[]
;
let models =
null
;
let imageSize = '';
let imageSize = '';
let steps = 50;
const getModels = async () => {
const getModels = async () => {
models = await getDiffusionModels(localStorage.token).catch((error) => {
models = await getDiffusionModels(localStorage.token).catch((error) => {
toast.error(error);
toast.error(error);
return null;
return null;
});
});
selectedModel = await getDefaultDiffusionModel(localStorage.token);
selectedModel = await getDefaultDiffusionModel(localStorage.token).catch((error) => {
return '';
});
};
};
const updateAUTOMATIC1111UrlHandler = async () => {
const updateAUTOMATIC1111UrlHandler = async () => {
...
@@ -83,6 +88,7 @@
...
@@ -83,6 +88,7 @@
if (enableImageGeneration && AUTOMATIC1111_BASE_URL) {
if (enableImageGeneration && AUTOMATIC1111_BASE_URL) {
imageSize = await getImageSize(localStorage.token);
imageSize = await getImageSize(localStorage.token);
steps = await getImageSteps(localStorage.token);
getModels();
getModels();
}
}
}
}
...
@@ -98,12 +104,16 @@
...
@@ -98,12 +104,16 @@
toast.error(error);
toast.error(error);
return null;
return null;
});
});
await updateImageSteps(localStorage.token, steps).catch((error) => {
toast.error(error);
return null;
});
dispatch('save');
dispatch('save');
loading = false;
loading = false;
}}
}}
>
>
<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-[2
1
rem]">
<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-[2
0.5
rem]">
<div>
<div>
<div class=" mb-1 text-sm font-medium">Image Settings</div>
<div class=" mb-1 text-sm font-medium">Image Settings</div>
...
@@ -188,7 +198,7 @@
...
@@ -188,7 +198,7 @@
{#if !selectedModel}
{#if !selectedModel}
<option value="" disabled selected>Select a model</option>
<option value="" disabled selected>Select a model</option>
{/if}
{/if}
{#each models as model}
{#each models
?? []
as model}
<option value={model.title} class="bg-gray-100 dark:bg-gray-700"
<option value={model.title} class="bg-gray-100 dark:bg-gray-700"
>{model.model_name}</option
>{model.model_name}</option
>
>
...
@@ -210,6 +220,19 @@
...
@@ -210,6 +220,19 @@
</div>
</div>
</div>
</div>
</div>
</div>
<div>
<div class=" mb-2.5 text-sm font-medium">Set Steps</div>
<div class="flex w-full">
<div class="flex-1 mr-2">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter Number of Steps (e.g. 50)"
bind:value={steps}
/>
</div>
</div>
</div>
{/if}
{/if}
</div>
</div>
...
...
src/lib/components/chat/Settings/Models.svelte
View file @
02fb517b
This diff is collapsed.
Click to expand it.
src/lib/components/chat/SettingsModal.svelte
View file @
02fb517b
...
@@ -4,6 +4,7 @@
...
@@ -4,6 +4,7 @@
import { getOllamaModels } from '$lib/apis/ollama';
import { getOllamaModels } from '$lib/apis/ollama';
import { getOpenAIModels } from '$lib/apis/openai';
import { getOpenAIModels } from '$lib/apis/openai';
import { getLiteLLMModels } from '$lib/apis/litellm';
import Modal from '../common/Modal.svelte';
import Modal from '../common/Modal.svelte';
import Account from './Settings/Account.svelte';
import Account from './Settings/Account.svelte';
...
@@ -27,23 +28,29 @@
...
@@ -27,23 +28,29 @@
let selectedTab = 'general';
let selectedTab = 'general';
const getModels = async (type = 'all') => {
const getModels = async () => {
const models = [];
let models = await Promise.all([
models.push(
await getOllamaModels(localStorage.token).catch((error) => {
...(await getOllamaModels(localStorage.token).catch((error) => {
console.log(error);
toast.error(error);
return null;
return [];
}),
}))
await getOpenAIModels(localStorage.token).catch((error) => {
);
if (type === 'all') {
const openAIModels = await getOpenAIModels(localStorage.token).catch((error) => {
console.log(error);
console.log(error);
return null;
return null;
});
}),
models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
await getLiteLLMModels(localStorage.token).catch((error) => {
}
console.log(error);
return null;
})
]);
models = models
.filter((models) => models)
.reduce((a, e, i, arr) => a.concat(e, ...(i < arr.length - 1 ? [{ name: 'hr' }] : [])), []);
// models.push(...(ollamaModels ? [{ name: 'hr' }, ...ollamaModels] : []));
// models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
// models.push(...(liteLLMModels ? [{ name: 'hr' }, ...liteLLMModels] : []));
return models;
return models;
};
};
</script>
</script>
...
...
src/lib/constants.ts
View file @
02fb517b
...
@@ -5,6 +5,8 @@ export const APP_NAME = 'Open WebUI';
...
@@ -5,6 +5,8 @@ export const APP_NAME = 'Open WebUI';
export
const
WEBUI_BASE_URL
=
dev
?
`http://
${
location
.
hostname
}
:8080`
:
``
;
export
const
WEBUI_BASE_URL
=
dev
?
`http://
${
location
.
hostname
}
:8080`
:
``
;
export
const
WEBUI_API_BASE_URL
=
`
${
WEBUI_BASE_URL
}
/api/v1`
;
export
const
WEBUI_API_BASE_URL
=
`
${
WEBUI_BASE_URL
}
/api/v1`
;
export
const
LITELLM_API_BASE_URL
=
`
${
WEBUI_BASE_URL
}
/litellm/api`
;
export
const
OLLAMA_API_BASE_URL
=
`
${
WEBUI_BASE_URL
}
/ollama/api`
;
export
const
OLLAMA_API_BASE_URL
=
`
${
WEBUI_BASE_URL
}
/ollama/api`
;
export
const
OPENAI_API_BASE_URL
=
`
${
WEBUI_BASE_URL
}
/openai/api`
;
export
const
OPENAI_API_BASE_URL
=
`
${
WEBUI_BASE_URL
}
/openai/api`
;
export
const
AUDIO_API_BASE_URL
=
`
${
WEBUI_BASE_URL
}
/audio/api/v1`
;
export
const
AUDIO_API_BASE_URL
=
`
${
WEBUI_BASE_URL
}
/audio/api/v1`
;
...
...
src/routes/(app)/+layout.svelte
View file @
02fb517b
...
@@ -11,6 +11,7 @@
...
@@ -11,6 +11,7 @@
import { getModelfiles } from '$lib/apis/modelfiles';
import { getModelfiles } from '$lib/apis/modelfiles';
import { getPrompts } from '$lib/apis/prompts';
import { getPrompts } from '$lib/apis/prompts';
import { getOpenAIModels } from '$lib/apis/openai';
import { getOpenAIModels } from '$lib/apis/openai';
import { getLiteLLMModels } from '$lib/apis/litellm';
import { getDocs } from '$lib/apis/documents';
import { getDocs } from '$lib/apis/documents';
import { getAllChatTags } from '$lib/apis/chats';
import { getAllChatTags } from '$lib/apis/chats';
...
@@ -43,24 +44,28 @@
...
@@ -43,24 +44,28 @@
let showShortcuts = false;
let showShortcuts = false;
const getModels = async () => {
const getModels = async () => {
let models = [];
let models = await Promise.all([
models.push(
await getOllamaModels(localStorage.token).catch((error) => {
...(await getOllamaModels(localStorage.token).catch((error) => {
console.log(error);
toast.error(error);
return null;
return [];
}),
}))
await getOpenAIModels(localStorage.token).catch((error) => {
);
console.log(error);
return null;
// $settings.OPENAI_API_BASE_URL ?? 'https://api.openai.com/v1',
}),
// $settings.OPENAI_API_KEY
await getLiteLLMModels(localStorage.token).catch((error) => {
console.log(error);
const openAIModels = await getOpenAIModels(localStorage.token).catch((error) => {
return null;
console.log(error);
})
return null;
]);
});
models = models
models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
.filter((models) => models)
.reduce((a, e, i, arr) => a.concat(e, ...(i < arr.length - 1 ? [{ name: 'hr' }] : [])), []);
// models.push(...(ollamaModels ? [{ name: 'hr' }, ...ollamaModels] : []));
// models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
// models.push(...(liteLLMModels ? [{ name: 'hr' }, ...liteLLMModels] : []));
return models;
return models;
};
};
...
@@ -117,8 +122,6 @@
...
@@ -117,8 +122,6 @@
await models.set(await getModels());
await models.set(await getModels());
});
});
await setOllamaVersion();
document.addEventListener('keydown', function (event) {
document.addEventListener('keydown', function (event) {
const isCtrlPressed = event.ctrlKey || event.metaKey; // metaKey is for Cmd key on Mac
const isCtrlPressed = event.ctrlKey || event.metaKey; // metaKey is for Cmd key on Mac
// Check if the Shift key is pressed
// Check if the Shift key is pressed
...
@@ -250,60 +253,6 @@
...
@@ -250,60 +253,6 @@
</div>
</div>
</div>
</div>
</div>
</div>
{:else if checkVersion(REQUIRED_OLLAMA_VERSION, ollamaVersion ?? '0')}
<div class="fixed w-full h-full flex z-50">
<div
class="absolute w-full h-full backdrop-blur-md bg-white/20 dark:bg-gray-900/50 flex justify-center"
>
<div class="m-auto pb-44 flex flex-col justify-center">
<div class="max-w-md">
<div class="text-center dark:text-white text-2xl font-medium z-50">
Connection Issue or Update Needed
</div>
<div class=" mt-4 text-center text-sm dark:text-gray-200 w-full">
Oops! It seems like your Ollama needs a little attention. <br
class=" hidden sm:flex"
/>We've detected either a connection hiccup or observed that you're using an older
version. Ensure you're on the latest Ollama version
<br class=" hidden sm:flex" />(version
<span class=" dark:text-white font-medium">{REQUIRED_OLLAMA_VERSION} or higher</span
>) or check your connection.
<div class="mt-1 text-sm">
Trouble accessing Ollama?
<a
class=" text-black dark:text-white font-semibold underline"
href="https://github.com/open-webui/open-webui#troubleshooting"
target="_blank"
>
Click here for help.
</a>
</div>
</div>
<div class=" mt-6 mx-auto relative group w-fit">
<button
class="relative z-20 flex px-5 py-2 rounded-full bg-white border border-gray-100 dark:border-none hover:bg-gray-100 transition font-medium text-sm"
on:click={async () => {
location.href = '/';
// await setOllamaVersion();
}}
>
Check Again
</button>
<button
class="text-xs text-center w-full mt-2 text-gray-400 underline"
on:click={async () => {
await setOllamaVersion(REQUIRED_OLLAMA_VERSION);
}}>Close</button
>
</div>
</div>
</div>
</div>
</div>
{:else if localDBChats.length > 0}
{:else if localDBChats.length > 0}
<div class="fixed w-full h-full flex z-50">
<div class="fixed w-full h-full flex z-50">
<div
<div
...
...
src/routes/(app)/+page.svelte
View file @
02fb517b
...
@@ -36,6 +36,7 @@
...
@@ -36,6 +36,7 @@
import
ModelSelector
from
'$lib/components/chat/ModelSelector.svelte'
;
import
ModelSelector
from
'$lib/components/chat/ModelSelector.svelte'
;
import
Navbar
from
'$lib/components/layout/Navbar.svelte'
;
import
Navbar
from
'$lib/components/layout/Navbar.svelte'
;
import
{
RAGTemplate
}
from
'$lib/utils/rag'
;
import
{
RAGTemplate
}
from
'$lib/utils/rag'
;
import
{
LITELLM_API_BASE_URL
,
OPENAI_API_BASE_URL
}
from
'$lib/constants'
;
import
{
WEBUI_BASE_URL
}
from
'$lib/constants'
;
import
{
WEBUI_BASE_URL
}
from
'$lib/constants'
;
let
stopResponseFlag
=
false
;
let
stopResponseFlag
=
false
;
...
@@ -132,6 +133,10 @@
...
@@ -132,6 +133,10 @@
selectedModels
=
[
''
];
selectedModels
=
[
''
];
}
}
selectedModels
=
selectedModels
.
map
((
modelId
)
=>
$
models
.
map
((
m
)
=>
m
.
id
).
includes
(
modelId
)
?
modelId
:
''
);
let
_settings
=
JSON
.
parse
(
localStorage
.
getItem
(
'settings'
)
??
'{}'
);
let
_settings
=
JSON
.
parse
(
localStorage
.
getItem
(
'settings'
)
??
'{}'
);
settings
.
set
({
settings
.
set
({
...
_settings
...
_settings
...
@@ -150,6 +155,10 @@
...
@@ -150,6 +155,10 @@
const
submitPrompt
=
async
(
userPrompt
,
_user
=
null
)
=>
{
const
submitPrompt
=
async
(
userPrompt
,
_user
=
null
)
=>
{
console
.
log
(
'submitPrompt'
,
$
chatId
);
console
.
log
(
'submitPrompt'
,
$
chatId
);
selectedModels
=
selectedModels
.
map
((
modelId
)
=>
$
models
.
map
((
m
)
=>
m
.
id
).
includes
(
modelId
)
?
modelId
:
''
);
if
(
selectedModels
.
includes
(
''
))
{
if
(
selectedModels
.
includes
(
''
))
{
toast
.
error
(
'Model not selected'
);
toast
.
error
(
'Model not selected'
);
}
else
if
(
messages
.
length
!= 0 && messages.at(-1).done != true) {
}
else
if
(
messages
.
length
!= 0 && messages.at(-1).done != true) {
...
@@ -278,40 +287,41 @@
...
@@ -278,40 +287,41 @@
}
}
await
Promise
.
all
(
await
Promise
.
all
(
selectedModels.map(async (model) => {
selectedModels
.
map
(
async
(
model
Id
)
=>
{
cons
ole.log(model
);
cons
t
model
=
$
models
.
filter
((
m
)
=>
m
.
id
===
modelId
).
at
(
0
);
const modelTag = $models.filter((m) => m.name === model).at(0);
if
(
model
)
{
// Create response message
//
Create
response
message
let responseMessageId = uuidv4();
let
responseMessageId
=
uuidv4
();
let responseMessage = {
let
responseMessage
=
{
parentId: parentId,
parentId
:
parentId
,
id: responseMessageId,
id
:
responseMessageId
,
childrenIds: [],
childrenIds
:
[],
role: 'assistant',
role
:
'assistant'
,
content: '',
content
:
''
,
model: model,
model
:
model
.
id
,
timestamp: Math.floor(Date.now() / 1000) // Unix epoch
timestamp
:
Math
.
floor
(
Date
.
now
()
/
1000
)
//
Unix
epoch
};
};
// Add message to history and Set currentId to messageId
//
Add
message
to
history
and
Set
currentId
to
messageId
history.messages[responseMessageId] = responseMessage;
history
.
messages
[
responseMessageId
]
=
responseMessage
;
history.currentId = responseMessageId;
history
.
currentId
=
responseMessageId
;
// Append messageId to childrenIds of parent message
//
Append
messageId
to
childrenIds
of
parent
message
if (parentId !== null) {
if
(
parentId
!== null) {
history.messages[parentId].childrenIds = [
history
.
messages
[
parentId
].
childrenIds
=
[
...history.messages[parentId].childrenIds,
...
history
.
messages
[
parentId
].
childrenIds
,
responseMessageId
responseMessageId
];
];
}
}
if (modelTag?.external) {
if
(
model
?.
external
)
{
await sendPromptOpenAI(model, prompt, responseMessageId, _chatId);
await
sendPromptOpenAI
(
model
,
prompt
,
responseMessageId
,
_chatId
);
} else if (modelTag) {
}
else
if
(
model
)
{
await sendPromptOllama(model, prompt, responseMessageId, _chatId);
await
sendPromptOllama
(
model
,
prompt
,
responseMessageId
,
_chatId
);
}
}
else
{
}
else
{
toast.error(`Model ${model} not found`);
toast
.
error
(`
Model
${
model
Id
}
not
found
`);
}
}
})
})
);
);
...
@@ -320,6 +330,7 @@
...
@@ -320,6 +330,7 @@
};
};
const
sendPromptOllama
=
async
(
model
,
userPrompt
,
responseMessageId
,
_chatId
)
=>
{
const
sendPromptOllama
=
async
(
model
,
userPrompt
,
responseMessageId
,
_chatId
)
=>
{
model
=
model
.
id
;
const
responseMessage
=
history
.
messages
[
responseMessageId
];
const
responseMessage
=
history
.
messages
[
responseMessageId
];
//
Wait
until
history
/
message
have
been
updated
//
Wait
until
history
/
message
have
been
updated
...
@@ -531,54 +542,58 @@
...
@@ -531,54 +542,58 @@
const
responseMessage
=
history
.
messages
[
responseMessageId
];
const
responseMessage
=
history
.
messages
[
responseMessageId
];
scrollToBottom
();
scrollToBottom
();
const res = await generateOpenAIChatCompletion(localStorage.token, {
const
res
=
await
generateOpenAIChatCompletion
(
model: model,
localStorage
.
token
,
stream: true,
{
messages: [
model
:
model
.
id
,
$settings.system
stream
:
true
,
? {
messages
:
[
role: 'system',
$
settings
.
system
content: $settings.system
}
: undefined,
...messages.filter((message) => !message.deleted)
]
.filter((message) => message)
.map((message, idx, arr) => ({
role: message.role,
...(message.files?.filter((file) => file.type === 'image').length > 0 ?? false
?
{
?
{
content: [
role
:
'system'
,
{
content
:
$
settings
.
system
type: 'text',
text:
arr.length - 1 !== idx
? message.content
: message?.raContent ?? message.content
},
...message.files
.filter((file) => file.type === 'image')
.map((file) => ({
type: 'image_url',
image_url: {
url: file.url
}
}))
]
}
}
: {
:
undefined
,
content:
...
messages
.
filter
((
message
)
=>
!message.deleted)
arr.length - 1 !== idx ? message.content : message?.raContent ?? message.content
]
})
.
filter
((
message
)
=>
message
)
})),
.
map
((
message
,
idx
,
arr
)
=>
({
seed: $settings?.options?.seed ?? undefined,
role
:
message
.
role
,
stop: $settings?.options?.stop ?? undefined,
...(
message
.
files
?.
filter
((
file
)
=>
file
.
type
===
'image'
).
length
>
0
??
false
temperature: $settings?.options?.temperature ?? undefined,
?
{
top_p: $settings?.options?.top_p ?? undefined,
content
:
[
num_ctx: $settings?.options?.num_ctx ?? undefined,
{
frequency_penalty: $settings?.options?.repeat_penalty ?? undefined,
type
:
'text'
,
max_tokens: $settings?.options?.num_predict ?? undefined
text
:
});
arr
.
length
-
1
!== idx
?
message
.
content
:
message
?.
raContent
??
message
.
content
},
...
message
.
files
.
filter
((
file
)
=>
file
.
type
===
'image'
)
.
map
((
file
)
=>
({
type
:
'image_url'
,
image_url
:
{
url
:
file
.
url
}
}))
]
}
:
{
content
:
arr
.
length
-
1
!== idx ? message.content : message?.raContent ?? message.content
})
})),
seed
:
$
settings
?.
options
?.
seed
??
undefined
,
stop
:
$
settings
?.
options
?.
stop
??
undefined
,
temperature
:
$
settings
?.
options
?.
temperature
??
undefined
,
top_p
:
$
settings
?.
options
?.
top_p
??
undefined
,
num_ctx
:
$
settings
?.
options
?.
num_ctx
??
undefined
,
frequency_penalty
:
$
settings
?.
options
?.
repeat_penalty
??
undefined
,
max_tokens
:
$
settings
?.
options
?.
num_predict
??
undefined
},
model
.
source
===
'litellm'
?
`${
LITELLM_API_BASE_URL
}/
v1
`
:
`${
OPENAI_API_BASE_URL
}`
);
if
(
res
&&
res
.
ok
)
{
if
(
res
&&
res
.
ok
)
{
const
reader
=
res
.
body
const
reader
=
res
.
body
...
...
test.json
0 → 100644
View file @
02fb517b
{
"model_name"
:
"string"
,
"litellm_params"
:
{
"model"
:
"ollama/mistral"
}
}
\ No newline at end of file
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment