Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dynamo
Commits
e0bb5bd3
Commit
e0bb5bd3
authored
Mar 14, 2025
by
Tanmay Verma
Committed by
GitHub
Mar 14, 2025
Browse files
feat: LLMAPI PoC with dynamo-run launcher (#114)
parent
76b79149
Changes
26
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
212 additions
and
5 deletions
+212
-5
examples/python_rs/llm/trtllm/llm_api_config.yaml
examples/python_rs/llm/trtllm/llm_api_config.yaml
+0
-0
examples/python_rs/llm/trtllm/monolith/__init__.py
examples/python_rs/llm/trtllm/monolith/__init__.py
+0
-0
examples/python_rs/llm/trtllm/monolith/dynamo_engine.py
examples/python_rs/llm/trtllm/monolith/dynamo_engine.py
+78
-0
examples/python_rs/llm/trtllm/monolith/launch.py
examples/python_rs/llm/trtllm/monolith/launch.py
+34
-0
examples/python_rs/llm/trtllm/monolith/worker.py
examples/python_rs/llm/trtllm/monolith/worker.py
+78
-0
lib/llm/src/engines/python.rs
lib/llm/src/engines/python.rs
+22
-5
No files found.
examples/python_rs/llm/t
ensor
rt
_
llm/llm_api_config.yaml
→
examples/python_rs/llm/trtllm/llm_api_config.yaml
View file @
e0bb5bd3
File moved
examples/python_rs/llm/t
ensor
rt
_
llm/monolith/__init__.py
→
examples/python_rs/llm/trtllm/monolith/__init__.py
View file @
e0bb5bd3
File moved
examples/python_rs/llm/trtllm/monolith/dynamo_engine.py
0 → 100644
View file @
e0bb5bd3
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
IMPORTANT:
- This is only supposed to be used by dynamo-run launcher.
- It is part of bring-your-own-engine python feature in dynamo-run.
"""
import
sys
from
pathlib
import
Path
from
tensorrt_llm.logger
import
logger
from
tensorrt_llm.serve.openai_protocol
import
(
ChatCompletionRequest
,
ChatCompletionStreamResponse
,
)
from
dynamo.runtime
import
dynamo_endpoint
# Add the project root to the Python path
project_root
=
str
(
Path
(
__file__
).
parents
[
1
])
# Go up to trtllm directory
if
project_root
not
in
sys
.
path
:
sys
.
path
.
append
(
project_root
)
from
common.base_engine
import
(
# noqa: E402
BaseTensorrtLLMEngine
,
TensorrtLLMEngineConfig
,
)
from
common.generators
import
chat_generator
# noqa: E402
from
common.parser
import
parse_dynamo_run_args
# noqa: E402
logger
.
set_level
(
"info"
)
class
DynamoTRTLLMEngine
(
BaseTensorrtLLMEngine
):
"""
Request handler for the generate endpoint
"""
def
__init__
(
self
,
trt_llm_engine_config
:
TensorrtLLMEngineConfig
):
super
().
__init__
(
trt_llm_engine_config
)
engine
=
None
# Global variable to store the engine instance. This is initialized in the main function.
def
init_global_engine
(
args
,
engine_config
):
global
engine
logger
.
debug
(
f
"Received args:
{
args
}
"
)
logger
.
info
(
f
"Initializing global engine with engine config:
{
engine_config
}
"
)
trt_llm_engine_config
=
TensorrtLLMEngineConfig
(
engine_config
=
engine_config
,
)
engine
=
DynamoTRTLLMEngine
(
trt_llm_engine_config
)
@
dynamo_endpoint
(
ChatCompletionRequest
,
ChatCompletionStreamResponse
)
async
def
generate
(
request
):
async
for
response
in
chat_generator
(
engine
,
request
):
yield
response
if
__name__
==
"__main__"
:
args
,
engine_config
=
parse_dynamo_run_args
()
init_global_engine
(
args
,
engine_config
)
examples/python_rs/llm/trtllm/monolith/launch.py
0 → 100644
View file @
e0bb5bd3
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
asyncio
import
sys
from
pathlib
import
Path
import
uvloop
# Add the project root to the Python path
project_root
=
str
(
Path
(
__file__
).
parents
[
1
])
# Go up to trtllm directory
if
project_root
not
in
sys
.
path
:
sys
.
path
.
append
(
project_root
)
from
common.parser
import
parse_tensorrt_llm_args
# noqa: E402
from
.worker
import
trtllm_worker
# noqa: E402
if
__name__
==
"__main__"
:
uvloop
.
install
()
args
,
engine_config
=
parse_tensorrt_llm_args
()
asyncio
.
run
(
trtllm_worker
(
engine_config
))
examples/python_rs/llm/t
ensor
rt
_
llm/monolith/worker.py
→
examples/python_rs/llm/trtllm/monolith/worker.py
View file @
e0bb5bd3
...
@@ -13,17 +13,11 @@
...
@@ -13,17 +13,11 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
asyncio
import
asyncio
import
json
import
signal
import
uuid
import
uvloop
from
common.base_engine
import
BaseTensorrtLLMEngine
,
TensorrtLLMEngineConfig
from
common.base_engine
import
BaseTensorrtLLMEngine
,
TensorrtLLMEngineConfig
from
common.parser
import
LLMAPIConfig
,
parse_tensorrt_llm_args
from
common.generators
import
chat_generator
,
completion_generator
from
common.processor
import
merge_promises
,
parse_chat_message_content
from
common.parser
import
LLMAPIConfig
from
tensorrt_llm.executor
import
CppExecutorError
from
tensorrt_llm.logger
import
logger
from
tensorrt_llm.logger
import
logger
from
tensorrt_llm.serve.openai_protocol
import
(
from
tensorrt_llm.serve.openai_protocol
import
(
ChatCompletionRequest
,
ChatCompletionRequest
,
...
@@ -47,101 +41,17 @@ class TensorrtLLMEngine(BaseTensorrtLLMEngine):
...
@@ -47,101 +41,17 @@ class TensorrtLLMEngine(BaseTensorrtLLMEngine):
@
dynamo_endpoint
(
ChatCompletionRequest
,
ChatCompletionStreamResponse
)
@
dynamo_endpoint
(
ChatCompletionRequest
,
ChatCompletionStreamResponse
)
async
def
generate_chat
(
self
,
request
):
async
def
generate_chat
(
self
,
request
):
if
self
.
_llm_engine
is
None
:
async
for
response
in
chat_generator
(
self
,
request
):
raise
RuntimeError
(
"Engine not initialized"
)
logger
.
debug
(
f
"Received chat request:
{
request
}
"
)
request_id
=
str
(
uuid
.
uuid4
())
self
.
_ongoing_request_count
+=
1
try
:
conversation
=
[]
for
message
in
request
.
messages
:
conversation
.
extend
(
parse_chat_message_content
(
message
))
tool_dicts
=
(
None
if
request
.
tools
is
None
else
[
tool
.
model_dump
()
for
tool
in
request
.
tools
]
)
prompt
:
str
=
self
.
_tokenizer
.
apply_chat_template
(
conversation
=
conversation
,
tokenize
=
False
,
add_generation_prompt
=
request
.
add_generation_prompt
,
tools
=
tool_dicts
,
documents
=
request
.
documents
,
chat_template
=
request
.
chat_template
,
**
(
request
.
chat_template_kwargs
or
{}),
)
sampling_params
=
request
.
to_sampling_params
()
promise
=
self
.
_llm_engine
.
generate_async
(
prompt
,
sampling_params
,
streaming
=
request
.
stream
,
)
# NOTE: somehow stream and non-stream is working with the same path
response_generator
=
self
.
chat_processor
.
stream_response
(
request
,
request_id
,
conversation
,
promise
)
async
for
response
in
response_generator
:
yield
response
yield
response
self
.
_ongoing_request_count
-=
1
except
CppExecutorError
:
# If internal executor error is raised, shutdown the server
signal
.
raise_signal
(
signal
.
SIGINT
)
except
Exception
as
e
:
raise
RuntimeError
(
"Failed to generate: "
+
str
(
e
))
@
dynamo_endpoint
(
CompletionRequest
,
CompletionStreamResponse
)
@
dynamo_endpoint
(
CompletionRequest
,
CompletionStreamResponse
)
async
def
generate_completion
(
self
,
request
):
async
def
generate_completion
(
self
,
request
):
if
self
.
_llm_engine
is
None
:
async
for
response
in
completion_generator
(
self
,
request
):
raise
RuntimeError
(
"Engine not initialized"
)
yield
response
self
.
_ongoing_request_count
+=
1
logger
.
debug
(
f
"Received completion request:
{
request
}
"
)
if
isinstance
(
request
.
prompt
,
str
)
or
(
isinstance
(
request
.
prompt
,
list
)
and
isinstance
(
request
.
prompt
[
0
],
int
)
):
prompts
=
[
request
.
prompt
]
else
:
prompts
=
request
.
prompt
promises
=
[]
sampling_params
=
request
.
to_sampling_params
()
try
:
for
prompt
in
prompts
:
promise
=
self
.
_llm_engine
.
generate_async
(
prompt
,
sampling_params
,
streaming
=
request
.
stream
,
)
promises
.
append
(
promise
)
generator
=
merge_promises
(
promises
)
num_choices
=
(
len
(
prompts
)
if
request
.
n
is
None
else
len
(
prompts
)
*
request
.
n
)
# NOTE: always send `stream: true` to the worker, and decide whether to aggregate or not before sending the response back to client.
response_generator
=
self
.
completions_processor
.
create_completion_generator
(
request
,
generator
,
num_choices
)
async
for
response
in
response_generator
:
yield
json
.
loads
(
response
)
self
.
_ongoing_request_count
-=
1
except
CppExecutorError
:
# If internal executor error is raised, shutdown the server
signal
.
raise_signal
(
signal
.
SIGINT
)
except
Exception
as
e
:
raise
RuntimeError
(
"Failed to generate: "
+
str
(
e
))
@
dynamo_worker
()
@
dynamo_worker
()
async
def
worker
(
runtime
:
DistributedRuntime
,
engine_config
:
LLMAPIConfig
):
async
def
trtllm_
worker
(
runtime
:
DistributedRuntime
,
engine_config
:
LLMAPIConfig
):
"""
"""
Instantiate a `backend` component and serve the `generate` endpoint
Instantiate a `backend` component and serve the `generate` endpoint
A `Component` can serve multiple endpoints
A `Component` can serve multiple endpoints
...
@@ -166,9 +76,3 @@ async def worker(runtime: DistributedRuntime, engine_config: LLMAPIConfig):
...
@@ -166,9 +76,3 @@ async def worker(runtime: DistributedRuntime, engine_config: LLMAPIConfig):
completions_endpoint
.
serve_endpoint
(
engine
.
generate_completion
),
completions_endpoint
.
serve_endpoint
(
engine
.
generate_completion
),
chat_completions_endpoint
.
serve_endpoint
(
engine
.
generate_chat
),
chat_completions_endpoint
.
serve_endpoint
(
engine
.
generate_chat
),
)
)
if
__name__
==
"__main__"
:
uvloop
.
install
()
args
,
engine_config
=
parse_tensorrt_llm_args
()
asyncio
.
run
(
worker
(
engine_config
))
lib/llm/src/engines/python.rs
View file @
e0bb5bd3
...
@@ -41,15 +41,32 @@ use crate::types::openai::chat_completions::OpenAIChatCompletionsStreamingEngine
...
@@ -41,15 +41,32 @@ use crate::types::openai::chat_completions::OpenAIChatCompletionsStreamingEngine
/// Python snippet to import a file as a module
/// Python snippet to import a file as a module
const
PY_IMPORT
:
&
CStr
=
cr
#
"
const
PY_IMPORT
:
&
CStr
=
cr
#
"
import
importlib.util
import
runpy
import sys
import sys
import os
import functools
import types
spec = importlib.util.spec_from_file_location("
__
main__
", file_path)
module_dir = os.path.dirname(file_path)
module = importlib.util.module_from_spec(spec)
if module_dir not in sys.path:
sys.path.insert(0, module_dir)
sys.argv = sys_argv
sys.argv = sys_argv
sys.modules["
__
main__
"] = module
module_dict = runpy.run_path(file_path, run_name='__main__')
spec.loader.exec_module(module)
# Create a module class with the generate function
class Module:
def __init__(self, module_dict):
self.__dict__.update(module_dict)
self._generate_func = module_dict['generate']
async def generate(self, request):
async for response in self._generate_func(request):
yield response
# Create module instance and store it in globals
module = Module(module_dict)
globals()['module'] = module
"
#
;
"
#
;
/// An engine that takes and returns strings, feeding them to a python written engine
/// An engine that takes and returns strings, feeding them to a python written engine
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment