Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
open-webui
Commits
cb8c45d8
Commit
cb8c45d8
authored
May 31, 2024
by
Timothy J. Baek
Browse files
fix: pipelines
parent
fc2b314c
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
11 additions
and
8 deletions
+11
-8
backend/apps/openai/main.py
backend/apps/openai/main.py
+0
-5
backend/main.py
backend/main.py
+6
-2
src/lib/apis/openai/index.ts
src/lib/apis/openai/index.ts
+4
-1
src/lib/components/chat/Chat.svelte
src/lib/components/chat/Chat.svelte
+1
-0
No files found.
backend/apps/openai/main.py
View file @
cb8c45d8
...
@@ -400,11 +400,6 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
...
@@ -400,11 +400,6 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
if
"pipeline"
in
model
and
model
.
get
(
"pipeline"
):
if
"pipeline"
in
model
and
model
.
get
(
"pipeline"
):
payload
[
"user"
]
=
{
"name"
:
user
.
name
,
"id"
:
user
.
id
}
payload
[
"user"
]
=
{
"name"
:
user
.
name
,
"id"
:
user
.
id
}
payload
[
"title"
]
=
(
True
if
payload
[
"stream"
]
==
False
and
payload
[
"max_tokens"
]
==
50
else
False
)
# Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000
# Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000
# This is a workaround until OpenAI fixes the issue with this model
# This is a workaround until OpenAI fixes the issue with this model
...
...
backend/main.py
View file @
cb8c45d8
...
@@ -315,8 +315,12 @@ class PipelineMiddleware(BaseHTTPMiddleware):
...
@@ -315,8 +315,12 @@ class PipelineMiddleware(BaseHTTPMiddleware):
else
:
else
:
pass
pass
if
"chat_id"
in
data
:
if
"pipeline"
not
in
app
.
state
.
MODELS
[
model_id
]:
del
data
[
"chat_id"
]
if
"chat_id"
in
data
:
del
data
[
"chat_id"
]
if
"title"
in
data
:
del
data
[
"title"
]
modified_body_bytes
=
json
.
dumps
(
data
).
encode
(
"utf-8"
)
modified_body_bytes
=
json
.
dumps
(
data
).
encode
(
"utf-8"
)
# Replace the request body with the modified one
# Replace the request body with the modified one
...
...
src/lib/apis/openai/index.ts
View file @
cb8c45d8
...
@@ -336,6 +336,7 @@ export const generateTitle = async (
...
@@ -336,6 +336,7 @@ export const generateTitle = async (
template
:
string
,
template
:
string
,
model
:
string
,
model
:
string
,
prompt
:
string
,
prompt
:
string
,
chat_id
?:
string
,
url
:
string
=
OPENAI_API_BASE_URL
url
:
string
=
OPENAI_API_BASE_URL
)
=>
{
)
=>
{
let
error
=
null
;
let
error
=
null
;
...
@@ -361,7 +362,9 @@ export const generateTitle = async (
...
@@ -361,7 +362,9 @@ export const generateTitle = async (
],
],
stream
:
false
,
stream
:
false
,
// Restricting the max tokens to 50 to avoid long titles
// Restricting the max tokens to 50 to avoid long titles
max_tokens
:
50
max_tokens
:
50
,
...(
chat_id
&&
{
chat_id
:
chat_id
}),
title
:
true
})
})
})
})
.
then
(
async
(
res
)
=>
{
.
then
(
async
(
res
)
=>
{
...
...
src/lib/components/chat/Chat.svelte
View file @
cb8c45d8
...
@@ -1118,6 +1118,7 @@
...
@@ -1118,6 +1118,7 @@
) + '
{{
prompt
}}
',
) + '
{{
prompt
}}
',
titleModelId,
titleModelId,
userPrompt,
userPrompt,
$chatId,
titleModel?.owned_by === '
openai
' ?? false
titleModel?.owned_by === '
openai
' ?? false
? `${OPENAI_API_BASE_URL}`
? `${OPENAI_API_BASE_URL}`
: `${OLLAMA_API_BASE_URL}/v1`
: `${OLLAMA_API_BASE_URL}/v1`
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment