Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ktransformers
Commits
6e4da83d
Unverified
Commit
6e4da83d
authored
Apr 17, 2025
by
wang jiahao
Committed by
GitHub
Apr 17, 2025
Browse files
Merge pull request #978 from cyhasuka/main
Feat: Support Non-streaming chat in Ollama backend
parents
b0551323
877aec85
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
62 additions
and
8 deletions
+62
-8
ktransformers/server/api/ollama/completions.py
ktransformers/server/api/ollama/completions.py
+62
-8
No files found.
ktransformers/server/api/ollama/completions.py
View file @
6e4da83d
...
@@ -49,7 +49,10 @@ class OllamaGenerationStreamResponse(BaseModel):
...
@@ -49,7 +49,10 @@ class OllamaGenerationStreamResponse(BaseModel):
done
:
bool
=
Field
(...)
done
:
bool
=
Field
(...)
class
OllamaGenerationResponse
(
BaseModel
):
class
OllamaGenerationResponse
(
BaseModel
):
pass
model
:
str
created_at
:
str
response
:
str
done
:
bool
@
router
.
post
(
"/generate"
,
tags
=
[
'ollama'
])
@
router
.
post
(
"/generate"
,
tags
=
[
'ollama'
])
async
def
generate
(
request
:
Request
,
input
:
OllamaGenerateCompletionRequest
):
async
def
generate
(
request
:
Request
,
input
:
OllamaGenerateCompletionRequest
):
...
@@ -81,8 +84,21 @@ async def generate(request: Request, input: OllamaGenerateCompletionRequest):
...
@@ -81,8 +84,21 @@ async def generate(request: Request, input: OllamaGenerateCompletionRequest):
yield
d
.
model_dump_json
()
+
'
\n
'
yield
d
.
model_dump_json
()
+
'
\n
'
return
check_link_response
(
request
,
inner
())
return
check_link_response
(
request
,
inner
())
else
:
else
:
raise
NotImplementedError
complete_response
=
""
async
for
res
in
interface
.
inference
(
input
.
prompt
,
id
):
if
isinstance
(
res
,
RawUsage
):
raw_usage
=
res
else
:
token
,
finish_reason
=
res
complete_response
+=
token
response
=
OllamaGenerationResponse
(
model
=
config
.
model_name
,
created_at
=
str
(
datetime
.
now
()),
response
=
complete_response
,
done
=
True
)
return
response
# https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion
# https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion
class
OllamaChatCompletionMessage
(
BaseModel
):
class
OllamaChatCompletionMessage
(
BaseModel
):
role
:
str
role
:
str
...
@@ -106,10 +122,17 @@ class OllamaChatCompletionStreamResponse(BaseModel):
...
@@ -106,10 +122,17 @@ class OllamaChatCompletionStreamResponse(BaseModel):
eval_count
:
Optional
[
int
]
=
Field
(
None
,
description
=
"Number of tokens generated"
)
eval_count
:
Optional
[
int
]
=
Field
(
None
,
description
=
"Number of tokens generated"
)
eval_duration
:
Optional
[
int
]
=
Field
(
None
,
description
=
"Time spent generating response in nanoseconds"
)
eval_duration
:
Optional
[
int
]
=
Field
(
None
,
description
=
"Time spent generating response in nanoseconds"
)
class
OllamaChatCompletionResponse
(
BaseModel
):
class
OllamaChatCompletionResponse
(
BaseModel
):
pass
model
:
str
created_at
:
str
message
:
dict
done
:
bool
total_duration
:
Optional
[
int
]
=
Field
(
None
,
description
=
"Total time spent in nanoseconds"
)
load_duration
:
Optional
[
int
]
=
Field
(
None
,
description
=
"Time spent loading model in nanoseconds"
)
prompt_eval_count
:
Optional
[
int
]
=
Field
(
None
,
description
=
"Number of tokens in prompt"
)
prompt_eval_duration
:
Optional
[
int
]
=
Field
(
None
,
description
=
"Time spent evaluating prompt in nanoseconds"
)
eval_count
:
Optional
[
int
]
=
Field
(
None
,
description
=
"Number of tokens generated"
)
eval_duration
:
Optional
[
int
]
=
Field
(
None
,
description
=
"Time spent generating response in nanoseconds"
)
@
router
.
post
(
"/chat"
,
tags
=
[
'ollama'
])
@
router
.
post
(
"/chat"
,
tags
=
[
'ollama'
])
async
def
chat
(
request
:
Request
,
input
:
OllamaChatCompletionRequest
):
async
def
chat
(
request
:
Request
,
input
:
OllamaChatCompletionRequest
):
...
@@ -164,8 +187,39 @@ async def chat(request: Request, input: OllamaChatCompletionRequest):
...
@@ -164,8 +187,39 @@ async def chat(request: Request, input: OllamaChatCompletionRequest):
yield
d
.
model_dump_json
()
+
'
\n
'
yield
d
.
model_dump_json
()
+
'
\n
'
return
check_link_response
(
request
,
inner
())
return
check_link_response
(
request
,
inner
())
else
:
else
:
raise
NotImplementedError
(
"Non-streaming chat is not implemented."
)
start_time
=
time
()
complete_response
=
""
eval_count
=
0
async
for
res
in
interface
.
inference
(
prompt
,
id
):
if
isinstance
(
res
,
RawUsage
):
raw_usage
=
res
else
:
token
,
finish_reason
=
res
complete_response
+=
token
eval_count
+=
1
end_time
=
time
()
total_duration
=
int
((
end_time
-
start_time
)
*
1_000_000_000
)
prompt_eval_count
=
len
(
prompt
.
split
())
eval_duration
=
total_duration
prompt_eval_duration
=
0
load_duration
=
0
response
=
OllamaChatCompletionResponse
(
model
=
config
.
model_name
,
created_at
=
str
(
datetime
.
now
()),
message
=
{
"role"
:
"assistant"
,
"content"
:
complete_response
},
done
=
True
,
total_duration
=
total_duration
,
load_duration
=
load_duration
,
prompt_eval_count
=
prompt_eval_count
,
prompt_eval_duration
=
prompt_eval_duration
,
eval_count
=
eval_count
,
eval_duration
=
eval_duration
)
return
response
# https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models
# https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models
class
OllamaModel
(
BaseModel
):
class
OllamaModel
(
BaseModel
):
name
:
str
name
:
str
...
@@ -224,4 +278,4 @@ async def show(request: Request, input: OllamaShowRequest):
...
@@ -224,4 +278,4 @@ async def show(request: Request, input: OllamaShowRequest):
quantization_level
=
" "
quantization_level
=
" "
),
),
model_info
=
OllamaModelInfo
()
model_info
=
OllamaModelInfo
()
)
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment