Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
open-webui
Commits
6b0eae9f
Unverified
Commit
6b0eae9f
authored
Feb 25, 2024
by
Timothy Jaeryang Baek
Committed by
GitHub
Feb 25, 2024
Browse files
Merge pull request #915 from open-webui/dev
fix: continue generation
parents
588daace
1ff0c9a9
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
122 additions
and
163 deletions
+122
-163
src/lib/components/chat/Messages.svelte
src/lib/components/chat/Messages.svelte
+0
-46
src/routes/(app)/+page.svelte
src/routes/(app)/+page.svelte
+19
-18
src/routes/(app)/c/[id]/+page.svelte
src/routes/(app)/c/[id]/+page.svelte
+103
-99
No files found.
src/lib/components/chat/Messages.svelte
View file @
6b0eae9f
...
...
@@ -281,52 +281,6 @@
{showNextMessage}
{copyToClipboard}
/>
{#if messages.length - 1 === messageIdx && processing !== ''}
<div class="flex my-2.5 ml-12 items-center w-fit space-x-2.5">
<div class=" dark:text-blue-100">
<svg
class=" w-4 h-4 translate-y-[0.5px]"
fill="currentColor"
viewBox="0 0 24 24"
xmlns="http://www.w3.org/2000/svg"
><style>
.spinner_qM83 {
animation: spinner_8HQG 1.05s infinite;
}
.spinner_oXPr {
animation-delay: 0.1s;
}
.spinner_ZTLf {
animation-delay: 0.2s;
}
@keyframes spinner_8HQG {
0%,
57.14% {
animation-timing-function: cubic-bezier(0.33, 0.66, 0.66, 1);
transform: translate(0);
}
28.57% {
animation-timing-function: cubic-bezier(0.33, 0, 0.66, 0.33);
transform: translateY(-6px);
}
100% {
transform: translate(0);
}
}
</style><circle class="spinner_qM83" cx="4" cy="12" r="2.5" /><circle
class="spinner_qM83 spinner_oXPr"
cx="12"
cy="12"
r="2.5"
/><circle class="spinner_qM83 spinner_ZTLf" cx="20" cy="12" r="2.5" /></svg
>
</div>
<div class=" text-sm font-medium">
{processing}
</div>
</div>
{/if}
{:else}
<ResponseMessage
{message}
...
...
src/routes/(app)/+page.svelte
View file @
6b0eae9f
...
...
@@ -732,25 +732,26 @@
responseMessage
.
done
=
false
;
await
tick
();
const
modelTag
=
$
models
.
filter
((
m
)
=>
m
.
name
===
responseMessage
.
model
).
at
(
0
);
if
(
modelTag
?.
external
)
{
await
sendPromptOpenAI
(
responseMessage
.
model
,
history
.
messages
[
responseMessage
.
parentId
].
content
,
responseMessage
.
id
,
_chatId
);
}
else
if
(
modelTag
)
{
await
sendPromptOllama
(
responseMessage
.
model
,
history
.
messages
[
responseMessage
.
parentId
].
content
,
responseMessage
.
id
,
_chatId
);
}
else
{
toast
.
error
(`
Model
${
model
}
not
found
`);
const
model
=
$
models
.
filter
((
m
)
=>
m
.
id
===
responseMessage
.
model
).
at
(
0
);
if
(
model
)
{
if
(
model
?.
external
)
{
await
sendPromptOpenAI
(
model
,
history
.
messages
[
responseMessage
.
parentId
].
content
,
responseMessage
.
id
,
_chatId
);
}
else
await
sendPromptOllama
(
model
,
history
.
messages
[
responseMessage
.
parentId
].
content
,
responseMessage
.
id
,
_chatId
);
}
}
else
{
toast
.
error
(`
Model
${
modelId
}
not
found
`);
}
};
...
...
src/routes/(app)/c/[id]/+page.svelte
View file @
6b0eae9f
...
...
@@ -238,7 +238,6 @@
await
sendPrompt
(
userPrompt
,
userMessageId
);
}
};
const
sendPrompt
=
async
(
prompt
,
parentId
)
=>
{
const
_chatId
=
JSON
.
parse
(
JSON
.
stringify
($
chatId
));
...
...
@@ -292,40 +291,41 @@
}
await
Promise
.
all
(
selectedModels.map(async (model) => {
cons
ole.log(model
);
const modelTag = $models.filter((m) => m.name === model).at(0);
// Create response message
let responseMessageId = uuidv4();
let responseMessage = {
parentId: parentId,
id: responseMessageId,
childrenIds: [],
role: 'assistant',
content: '',
model: model,
timestamp: Math.floor(Date.now() / 1000) // Unix epoch
};
// Add message to history and Set currentId to messageId
history.messages[responseMessageId] = responseMessage;
history.currentId = responseMessageId;
// Append messageId to childrenIds of parent message
if (parentId !== null) {
history.messages[parentId].childrenIds = [
...history.messages[parentId].childrenIds,
responseMessageId
];
}
selectedModels
.
map
(
async
(
model
Id
)
=>
{
cons
t
model
=
$
models
.
filter
((
m
)
=>
m
.
id
===
modelId
).
at
(
0
);
if
(
model
)
{
//
Create
response
message
let
responseMessageId
=
uuidv4
();
let
responseMessage
=
{
parentId
:
parentId
,
id
:
responseMessageId
,
childrenIds
:
[],
role
:
'assistant'
,
content
:
''
,
model
:
model
.
id
,
timestamp
:
Math
.
floor
(
Date
.
now
()
/
1000
)
//
Unix
epoch
};
//
Add
message
to
history
and
Set
currentId
to
messageId
history
.
messages
[
responseMessageId
]
=
responseMessage
;
history
.
currentId
=
responseMessageId
;
//
Append
messageId
to
childrenIds
of
parent
message
if
(
parentId
!== null) {
history
.
messages
[
parentId
].
childrenIds
=
[
...
history
.
messages
[
parentId
].
childrenIds
,
responseMessageId
];
}
if (modelTag?.external) {
await sendPromptOpenAI(model, prompt, responseMessageId, _chatId);
} else if (modelTag) {
await sendPromptOllama(model, prompt, responseMessageId, _chatId);
if
(
model
?.
external
)
{
await
sendPromptOpenAI
(
model
,
prompt
,
responseMessageId
,
_chatId
);
}
else
if
(
model
)
{
await
sendPromptOllama
(
model
,
prompt
,
responseMessageId
,
_chatId
);
}
}
else
{
toast.error(`Model ${model} not found`);
toast
.
error
(`
Model
${
model
Id
}
not
found
`);
}
})
);
...
...
@@ -334,6 +334,7 @@
};
const
sendPromptOllama
=
async
(
model
,
userPrompt
,
responseMessageId
,
_chatId
)
=>
{
model
=
model
.
id
;
const
responseMessage
=
history
.
messages
[
responseMessageId
];
//
Wait
until
history
/
message
have
been
updated
...
...
@@ -543,57 +544,60 @@
const
sendPromptOpenAI
=
async
(
model
,
userPrompt
,
responseMessageId
,
_chatId
)
=>
{
const
responseMessage
=
history
.
messages
[
responseMessageId
];
scrollToBottom
();
const res = await generateOpenAIChatCompletion(localStorage.token, {
model: model,
stream: true,
messages: [
$settings.system
? {
role: 'system',
content: $settings.system
}
: undefined,
...messages.filter((message) => !message.deleted)
]
.filter((message) => message)
.map((message, idx, arr) => ({
role: message.role,
...(message.files?.filter((file) => file.type === 'image').length > 0 ?? false
const
res
=
await
generateOpenAIChatCompletion
(
localStorage
.
token
,
{
model
:
model
.
id
,
stream
:
true
,
messages
:
[
$
settings
.
system
?
{
content: [
{
type: 'text',
text:
arr.length - 1 !== idx
? message.content
: message?.raContent ?? message.content
},
...message.files
.filter((file) => file.type === 'image')
.map((file) => ({
type: 'image_url',
image_url: {
url: file.url
}
}))
]
role
:
'system'
,
content
:
$
settings
.
system
}
: {
content:
arr.length - 1 !== idx ? message.content : message?.raContent ?? message.content
})
})),
seed: $settings?.options?.seed ?? undefined,
stop: $settings?.options?.stop ?? undefined,
temperature: $settings?.options?.temperature ?? undefined,
top_p: $settings?.options?.top_p ?? undefined,
num_ctx: $settings?.options?.num_ctx ?? undefined,
frequency_penalty: $settings?.options?.repeat_penalty ?? undefined,
max_tokens: $settings?.options?.num_predict ?? undefined
});
:
undefined
,
...
messages
.
filter
((
message
)
=>
!message.deleted)
]
.
filter
((
message
)
=>
message
)
.
map
((
message
,
idx
,
arr
)
=>
({
role
:
message
.
role
,
...(
message
.
files
?.
filter
((
file
)
=>
file
.
type
===
'image'
).
length
>
0
??
false
?
{
content
:
[
{
type
:
'text'
,
text
:
arr
.
length
-
1
!== idx
?
message
.
content
:
message
?.
raContent
??
message
.
content
},
...
message
.
files
.
filter
((
file
)
=>
file
.
type
===
'image'
)
.
map
((
file
)
=>
({
type
:
'image_url'
,
image_url
:
{
url
:
file
.
url
}
}))
]
}
:
{
content
:
arr
.
length
-
1
!== idx ? message.content : message?.raContent ?? message.content
})
})),
seed
:
$
settings
?.
options
?.
seed
??
undefined
,
stop
:
$
settings
?.
options
?.
stop
??
undefined
,
temperature
:
$
settings
?.
options
?.
temperature
??
undefined
,
top_p
:
$
settings
?.
options
?.
top_p
??
undefined
,
num_ctx
:
$
settings
?.
options
?.
num_ctx
??
undefined
,
frequency_penalty
:
$
settings
?.
options
?.
repeat_penalty
??
undefined
,
max_tokens
:
$
settings
?.
options
?.
num_predict
??
undefined
},
model
.
source
===
'litellm'
?
`${
LITELLM_API_BASE_URL
}/
v1
`
:
`${
OPENAI_API_BASE_URL
}`
);
if
(
res
&&
res
.
ok
)
{
const
reader
=
res
.
body
...
...
@@ -704,7 +708,6 @@
await
setChatTitle
(
_chatId
,
userPrompt
);
}
};
const
stopResponse
=
()
=>
{
stopResponseFlag
=
true
;
console
.
log
(
'stopResponse'
);
...
...
@@ -719,25 +722,26 @@
responseMessage
.
done
=
false
;
await
tick
();
const modelTag = $models.filter((m) => m.name === responseMessage.model).at(0);
if (modelTag?.external) {
await sendPromptOpenAI(
responseMessage.model,
history.messages[responseMessage.parentId].content,
responseMessage.id,
_chatId
);
} else if (modelTag) {
await sendPromptOllama(
responseMessage.model,
history.messages[responseMessage.parentId].content,
responseMessage.id,
_chatId
);
} else {
toast.error(`Model ${model} not found`);
const
model
=
$
models
.
filter
((
m
)
=>
m
.
id
===
responseMessage
.
model
).
at
(
0
);
if
(
model
)
{
if
(
model
?.
external
)
{
await
sendPromptOpenAI
(
model
,
history
.
messages
[
responseMessage
.
parentId
].
content
,
responseMessage
.
id
,
_chatId
);
}
else
await
sendPromptOllama
(
model
,
history
.
messages
[
responseMessage
.
parentId
].
content
,
responseMessage
.
id
,
_chatId
);
}
}
else
{
toast
.
error
(`
Model
${
modelId
}
not
found
`);
}
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment