Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ox696c
ktransformers
Commits
b1bff2a4
"src/libtorchaudio/sox/io.cpp" did not exist on "60a8e23d0171a809a08caf6cd4823ed7db4c50db"
Commit
b1bff2a4
authored
Feb 07, 2025
by
RodriMora
Browse files
Added simple /models endpoint to work with frontends that don't allow bypass check like Openweb-ui
parent
de7e892f
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
11 additions
and
3 deletions
+11
-3
ktransformers/server/api/openai/endpoints/chat.py
ktransformers/server/api/openai/endpoints/chat.py
+11
-3
No files found.
ktransformers/server/api/openai/endpoints/chat.py
View file @
b1bff2a4
...
@@ -10,8 +10,16 @@ from ktransformers.server.backend.base import BackendInterfaceBase
...
@@ -10,8 +10,16 @@ from ktransformers.server.backend.base import BackendInterfaceBase
router
=
APIRouter
()
router
=
APIRouter
()
models
=
[
{
"id"
:
"0"
,
"name"
:
"ktranformers-model"
},
]
@
router
.
post
(
'/chat/completions'
,
tags
=
[
'openai'
])
@
router
.
get
(
'/models'
,
tags
=
[
'openai'
])
async
def
list_models
():
return
models
@
router
.
post
(
'/chat/completions'
,
tags
=
[
'openai'
])
async
def
chat_completion
(
request
:
Request
,
create
:
ChatCompletionCreate
):
async
def
chat_completion
(
request
:
Request
,
create
:
ChatCompletionCreate
):
id
=
str
(
uuid4
())
id
=
str
(
uuid4
())
...
@@ -23,12 +31,12 @@ async def chat_completion(request:Request,create:ChatCompletionCreate):
...
@@ -23,12 +31,12 @@ async def chat_completion(request:Request,create:ChatCompletionCreate):
if
create
.
stream
:
if
create
.
stream
:
async
def
inner
():
async
def
inner
():
chunk
=
ChatCompletionChunk
(
id
=
id
,
object
=
'chat.completion.chunk'
,
created
=
int
(
time
()))
chunk
=
ChatCompletionChunk
(
id
=
id
,
object
=
'chat.completion.chunk'
,
created
=
int
(
time
()))
async
for
token
in
interface
.
inference
(
input_message
,
id
):
async
for
token
in
interface
.
inference
(
input_message
,
id
):
chunk
.
set_token
(
token
)
chunk
.
set_token
(
token
)
yield
chunk
yield
chunk
return
chat_stream_response
(
request
,
inner
())
return
chat_stream_response
(
request
,
inner
())
else
:
else
:
comp
=
ChatCompletionObject
(
id
=
id
,
object
=
'chat.completion.chunk'
,
created
=
int
(
time
()))
comp
=
ChatCompletionObject
(
id
=
id
,
object
=
'chat.completion.chunk'
,
created
=
int
(
time
()))
async
for
token
in
interface
.
inference
(
input_message
,
id
):
async
for
token
in
interface
.
inference
(
input_message
,
id
):
comp
.
append_token
(
token
)
comp
.
append_token
(
token
)
return
comp
return
comp
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment