Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
open-webui
Commits
1a9a56d6
Unverified
Commit
1a9a56d6
authored
Feb 24, 2024
by
Timothy Jaeryang Baek
Committed by
GitHub
Feb 24, 2024
Browse files
Merge pull request #844 from open-webui/litellm
feat: direct litellm integration
parents
437d7ff6
ec6f53e2
Changes
22
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
99 additions
and
78 deletions
+99
-78
src/routes/(app)/+page.svelte
src/routes/(app)/+page.svelte
+93
-78
test.json
test.json
+6
-0
No files found.
src/routes/(app)/+page.svelte
View file @
1a9a56d6
...
@@ -36,6 +36,7 @@
...
@@ -36,6 +36,7 @@
import
ModelSelector
from
'$lib/components/chat/ModelSelector.svelte'
;
import
ModelSelector
from
'$lib/components/chat/ModelSelector.svelte'
;
import
Navbar
from
'$lib/components/layout/Navbar.svelte'
;
import
Navbar
from
'$lib/components/layout/Navbar.svelte'
;
import
{
RAGTemplate
}
from
'$lib/utils/rag'
;
import
{
RAGTemplate
}
from
'$lib/utils/rag'
;
import
{
LITELLM_API_BASE_URL
,
OPENAI_API_BASE_URL
}
from
'$lib/constants'
;
import
{
WEBUI_BASE_URL
}
from
'$lib/constants'
;
import
{
WEBUI_BASE_URL
}
from
'$lib/constants'
;
let
stopResponseFlag
=
false
;
let
stopResponseFlag
=
false
;
...
@@ -132,6 +133,10 @@
...
@@ -132,6 +133,10 @@
selectedModels
=
[
''
];
selectedModels
=
[
''
];
}
}
selectedModels
=
selectedModels
.
map
((
modelId
)
=>
$
models
.
map
((
m
)
=>
m
.
id
).
includes
(
modelId
)
?
modelId
:
''
);
let
_settings
=
JSON
.
parse
(
localStorage
.
getItem
(
'settings'
)
??
'{}'
);
let
_settings
=
JSON
.
parse
(
localStorage
.
getItem
(
'settings'
)
??
'{}'
);
settings
.
set
({
settings
.
set
({
...
_settings
...
_settings
...
@@ -150,6 +155,10 @@
...
@@ -150,6 +155,10 @@
const
submitPrompt
=
async
(
userPrompt
,
_user
=
null
)
=>
{
const
submitPrompt
=
async
(
userPrompt
,
_user
=
null
)
=>
{
console
.
log
(
'submitPrompt'
,
$
chatId
);
console
.
log
(
'submitPrompt'
,
$
chatId
);
selectedModels
=
selectedModels
.
map
((
modelId
)
=>
$
models
.
map
((
m
)
=>
m
.
id
).
includes
(
modelId
)
?
modelId
:
''
);
if
(
selectedModels
.
includes
(
''
))
{
if
(
selectedModels
.
includes
(
''
))
{
toast
.
error
(
'Model not selected'
);
toast
.
error
(
'Model not selected'
);
}
else
if
(
messages
.
length
!= 0 && messages.at(-1).done != true) {
}
else
if
(
messages
.
length
!= 0 && messages.at(-1).done != true) {
...
@@ -278,10 +287,10 @@
...
@@ -278,10 +287,10 @@
}
}
await
Promise
.
all
(
await
Promise
.
all
(
selectedModels.map(async (model) => {
selectedModels
.
map
(
async
(
modelId
)
=>
{
console.log(model);
const
model
=
$
models
.
filter
((
m
)
=>
m
.
id
===
modelId
).
at
(
0
);
const modelTag = $models.filter((m) => m.name === model).at(0);
if
(
model
)
{
//
Create
response
message
//
Create
response
message
let
responseMessageId
=
uuidv4
();
let
responseMessageId
=
uuidv4
();
let
responseMessage
=
{
let
responseMessage
=
{
...
@@ -290,7 +299,7 @@
...
@@ -290,7 +299,7 @@
childrenIds
:
[],
childrenIds
:
[],
role
:
'assistant'
,
role
:
'assistant'
,
content
:
''
,
content
:
''
,
model: model,
model
:
model
.
id
,
timestamp
:
Math
.
floor
(
Date
.
now
()
/
1000
)
//
Unix
epoch
timestamp
:
Math
.
floor
(
Date
.
now
()
/
1000
)
//
Unix
epoch
};
};
...
@@ -306,12 +315,13 @@
...
@@ -306,12 +315,13 @@
];
];
}
}
if (model
Tag
?.external) {
if
(
model
?.
external
)
{
await
sendPromptOpenAI
(
model
,
prompt
,
responseMessageId
,
_chatId
);
await
sendPromptOpenAI
(
model
,
prompt
,
responseMessageId
,
_chatId
);
} else if (model
Tag
) {
}
else
if
(
model
)
{
await
sendPromptOllama
(
model
,
prompt
,
responseMessageId
,
_chatId
);
await
sendPromptOllama
(
model
,
prompt
,
responseMessageId
,
_chatId
);
}
}
else
{
}
else
{
toast.error(`Model ${model} not found`);
toast
.
error
(`
Model
${
model
Id
}
not
found
`);
}
}
})
})
);
);
...
@@ -320,6 +330,7 @@
...
@@ -320,6 +330,7 @@
};
};
const
sendPromptOllama
=
async
(
model
,
userPrompt
,
responseMessageId
,
_chatId
)
=>
{
const
sendPromptOllama
=
async
(
model
,
userPrompt
,
responseMessageId
,
_chatId
)
=>
{
model
=
model
.
id
;
const
responseMessage
=
history
.
messages
[
responseMessageId
];
const
responseMessage
=
history
.
messages
[
responseMessageId
];
//
Wait
until
history
/
message
have
been
updated
//
Wait
until
history
/
message
have
been
updated
...
@@ -531,8 +542,10 @@
...
@@ -531,8 +542,10 @@
const
responseMessage
=
history
.
messages
[
responseMessageId
];
const
responseMessage
=
history
.
messages
[
responseMessageId
];
scrollToBottom
();
scrollToBottom
();
const res = await generateOpenAIChatCompletion(localStorage.token, {
const
res
=
await
generateOpenAIChatCompletion
(
model: model,
localStorage
.
token
,
{
model
:
model
.
id
,
stream
:
true
,
stream
:
true
,
messages
:
[
messages
:
[
$
settings
.
system
$
settings
.
system
...
@@ -578,7 +591,9 @@
...
@@ -578,7 +591,9 @@
num_ctx
:
$
settings
?.
options
?.
num_ctx
??
undefined
,
num_ctx
:
$
settings
?.
options
?.
num_ctx
??
undefined
,
frequency_penalty
:
$
settings
?.
options
?.
repeat_penalty
??
undefined
,
frequency_penalty
:
$
settings
?.
options
?.
repeat_penalty
??
undefined
,
max_tokens
:
$
settings
?.
options
?.
num_predict
??
undefined
max_tokens
:
$
settings
?.
options
?.
num_predict
??
undefined
});
},
model
.
source
===
'litellm'
?
`${
LITELLM_API_BASE_URL
}/
v1
`
:
`${
OPENAI_API_BASE_URL
}`
);
if
(
res
&&
res
.
ok
)
{
if
(
res
&&
res
.
ok
)
{
const
reader
=
res
.
body
const
reader
=
res
.
body
...
...
test.json
0 → 100644
View file @
1a9a56d6
{
"model_name"
:
"string"
,
"litellm_params"
:
{
"model"
:
"ollama/mistral"
}
}
\ No newline at end of file
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment