Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
open-webui
Commits
6c58bb59
Commit
6c58bb59
authored
Mar 08, 2024
by
Timothy J. Baek
Browse files
feat: rag docs as payload field
parent
c49491e5
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
25 additions
and
53 deletions
+25
-53
backend/main.py
backend/main.py
+0
-2
src/routes/(app)/+page.svelte
src/routes/(app)/+page.svelte
+4
-2
src/routes/(app)/c/[id]/+page.svelte
src/routes/(app)/c/[id]/+page.svelte
+21
-49
No files found.
backend/main.py
View file @
6c58bb59
...
...
@@ -123,8 +123,6 @@ class RAGMiddleware(BaseHTTPMiddleware):
data
[
"messages"
][
last_user_message_idx
]
=
new_user_message
del
data
[
"docs"
]
print
(
"DATAAAAAAAAAAAAAAAAAA"
)
print
(
data
)
modified_body_bytes
=
json
.
dumps
(
data
).
encode
(
"utf-8"
)
# Create a new request with the modified body
...
...
src/routes/(app)/+page.svelte
View file @
6c58bb59
...
...
@@ -336,7 +336,7 @@
},
format
:
$
settings
.
requestFormat
??
undefined
,
keep_alive
:
$
settings
.
keepAlive
??
undefined
,
docs
:
docs
docs
:
docs
.
length
>
0
?
docs
:
undefined
});
if
(
res
&&
res
.
ok
)
{
...
...
@@ -503,6 +503,8 @@
)
.
flat
(
1
);
console
.
log
(
docs
);
const
res
=
await
generateOpenAIChatCompletion
(
localStorage
.
token
,
{
...
...
@@ -552,7 +554,7 @@
num_ctx
:
$
settings
?.
options
?.
num_ctx
??
undefined
,
frequency_penalty
:
$
settings
?.
options
?.
repeat_penalty
??
undefined
,
max_tokens
:
$
settings
?.
options
?.
num_predict
??
undefined
,
docs
:
docs
docs
:
docs
.
length
>
0
?
docs
:
undefined
},
model
.
source
===
'litellm'
?
`${
LITELLM_API_BASE_URL
}/
v1
`
:
`${
OPENAI_API_BASE_URL
}`
);
...
...
src/routes/(app)/c/[id]/+page.svelte
View file @
6c58bb59
...
...
@@ -245,53 +245,6 @@
const
sendPrompt
=
async
(
prompt
,
parentId
)
=>
{
const
_chatId
=
JSON
.
parse
(
JSON
.
stringify
($
chatId
));
const
docs
=
messages
.
filter
((
message
)
=>
message
?.
files
??
null
)
.
map
((
message
)
=>
message
.
files
.
filter
((
item
)
=>
item
.
type
===
'doc'
||
item
.
type
===
'collection'
)
)
.
flat
(
1
);
console
.
log
(
docs
);
if
(
docs
.
length
>
0
)
{
processing
=
'Reading'
;
const
query
=
history
.
messages
[
parentId
].
content
;
let
relevantContexts
=
await
Promise
.
all
(
docs
.
map
(
async
(
doc
)
=>
{
if
(
doc
.
type
===
'collection'
)
{
return
await
queryCollection
(
localStorage
.
token
,
doc
.
collection_names
,
query
).
catch
(
(
error
)
=>
{
console
.
log
(
error
);
return
null
;
}
);
}
else
{
return
await
queryDoc
(
localStorage
.
token
,
doc
.
collection_name
,
query
).
catch
((
error
)
=>
{
console
.
log
(
error
);
return
null
;
});
}
})
);
relevantContexts
=
relevantContexts
.
filter
((
context
)
=>
context
);
const
contextString
=
relevantContexts
.
reduce
((
a
,
context
,
i
,
arr
)
=>
{
return
`${
a
}${
context
.
documents
.
join
(
' '
)}\
n
`;
},
''
);
console
.
log
(
contextString
);
history
.
messages
[
parentId
].
raContent
=
await
RAGTemplate
(
localStorage
.
token
,
contextString
,
query
);
history
.
messages
[
parentId
].
contexts
=
relevantContexts
;
await
tick
();
processing
=
''
;
}
await
Promise
.
all
(
selectedModels
.
map
(
async
(
modelId
)
=>
{
const
model
=
$
models
.
filter
((
m
)
=>
m
.
id
===
modelId
).
at
(
0
);
...
...
@@ -381,6 +334,13 @@
}
});
const
docs
=
messages
.
filter
((
message
)
=>
message
?.
files
??
null
)
.
map
((
message
)
=>
message
.
files
.
filter
((
item
)
=>
item
.
type
===
'doc'
||
item
.
type
===
'collection'
)
)
.
flat
(
1
);
const
[
res
,
controller
]
=
await
generateChatCompletion
(
localStorage
.
token
,
{
model
:
model
,
messages
:
messagesBody
,
...
...
@@ -388,7 +348,8 @@
...($
settings
.
options
??
{})
},
format
:
$
settings
.
requestFormat
??
undefined
,
keep_alive
:
$
settings
.
keepAlive
??
undefined
keep_alive
:
$
settings
.
keepAlive
??
undefined
,
docs
:
docs
.
length
>
0
?
docs
:
undefined
});
if
(
res
&&
res
.
ok
)
{
...
...
@@ -548,6 +509,15 @@
const
responseMessage
=
history
.
messages
[
responseMessageId
];
scrollToBottom
();
const
docs
=
messages
.
filter
((
message
)
=>
message
?.
files
??
null
)
.
map
((
message
)
=>
message
.
files
.
filter
((
item
)
=>
item
.
type
===
'doc'
||
item
.
type
===
'collection'
)
)
.
flat
(
1
);
console
.
log
(
docs
);
const
res
=
await
generateOpenAIChatCompletion
(
localStorage
.
token
,
{
...
...
@@ -596,7 +566,8 @@
top_p
:
$
settings
?.
options
?.
top_p
??
undefined
,
num_ctx
:
$
settings
?.
options
?.
num_ctx
??
undefined
,
frequency_penalty
:
$
settings
?.
options
?.
repeat_penalty
??
undefined
,
max_tokens
:
$
settings
?.
options
?.
num_predict
??
undefined
max_tokens
:
$
settings
?.
options
?.
num_predict
??
undefined
,
docs
:
docs
.
length
>
0
?
docs
:
undefined
},
model
.
source
===
'litellm'
?
`${
LITELLM_API_BASE_URL
}/
v1
`
:
`${
OPENAI_API_BASE_URL
}`
);
...
...
@@ -710,6 +681,7 @@
await
setChatTitle
(
_chatId
,
userPrompt
);
}
};
const
stopResponse
=
()
=>
{
stopResponseFlag
=
true
;
console
.
log
(
'stopResponse'
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment