Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dynamo
Commits
6eb31507
Commit
6eb31507
authored
Apr 07, 2025
by
ishandhanani
Committed by
GitHub
Apr 07, 2025
Browse files
fix: mypy error (#543)
Co-authored-by:
finofliu
<
finofliu@tencent.com
>
parent
99cc11e6
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
17 additions
and
5 deletions
+17
-5
examples/tensorrt_llm/common/chat_processor.py
examples/tensorrt_llm/common/chat_processor.py
+17
-5
No files found.
examples/tensorrt_llm/common/chat_processor.py
View file @
6eb31507
...
@@ -41,7 +41,8 @@ from tensorrt_llm.serve.openai_protocol import (
...
@@ -41,7 +41,8 @@ from tensorrt_llm.serve.openai_protocol import (
ToolCall
,
ToolCall
,
UsageInfo
,
UsageInfo
,
)
)
from
transformers
import
AutoTokenizer
from
transformers.tokenization_utils
import
PreTrainedTokenizer
from
transformers.tokenization_utils_fast
import
PreTrainedTokenizerFast
logger
.
set_level
(
"debug"
)
logger
.
set_level
(
"debug"
)
...
@@ -71,7 +72,11 @@ def parse_chat_message_content(
...
@@ -71,7 +72,11 @@ def parse_chat_message_content(
class
BaseChatProcessor
:
class
BaseChatProcessor
:
def
__init__
(
self
,
model
:
str
,
tokenizer
:
AutoTokenizer
):
def
__init__
(
self
,
model
:
str
,
tokenizer
:
Union
[
PreTrainedTokenizer
,
PreTrainedTokenizerFast
],
):
self
.
model
=
model
self
.
model
=
model
self
.
tokenizer
=
tokenizer
self
.
tokenizer
=
tokenizer
...
@@ -122,7 +127,10 @@ class BaseChatProcessor:
...
@@ -122,7 +127,10 @@ class BaseChatProcessor:
class
ChatProcessor
(
BaseChatProcessor
):
class
ChatProcessor
(
BaseChatProcessor
):
def
__init__
(
def
__init__
(
self
,
model
:
str
,
tokenizer
:
AutoTokenizer
,
using_engine_generator
:
bool
=
False
self
,
model
:
str
,
tokenizer
:
Union
[
PreTrainedTokenizer
,
PreTrainedTokenizerFast
],
using_engine_generator
:
bool
=
False
,
):
):
super
().
__init__
(
model
,
tokenizer
)
super
().
__init__
(
model
,
tokenizer
)
self
.
using_engine_generator
=
using_engine_generator
self
.
using_engine_generator
=
using_engine_generator
...
@@ -269,7 +277,7 @@ class ChatProcessor(BaseChatProcessor):
...
@@ -269,7 +277,7 @@ class ChatProcessor(BaseChatProcessor):
if
request
.
tools
is
None
if
request
.
tools
is
None
else
[
tool
.
model_dump
()
for
tool
in
request
.
tools
]
else
[
tool
.
model_dump
()
for
tool
in
request
.
tools
]
)
)
prompt
:
str
=
self
.
tokenizer
.
apply_chat_template
(
prompt
=
self
.
tokenizer
.
apply_chat_template
(
conversation
=
conversation
,
conversation
=
conversation
,
tokenize
=
False
,
tokenize
=
False
,
add_generation_prompt
=
request
.
add_generation_prompt
,
add_generation_prompt
=
request
.
add_generation_prompt
,
...
@@ -329,7 +337,11 @@ class ChatProcessor(BaseChatProcessor):
...
@@ -329,7 +337,11 @@ class ChatProcessor(BaseChatProcessor):
class
CompletionsProcessor
:
class
CompletionsProcessor
:
def
__init__
(
self
,
model
:
str
,
tokenizer
:
AutoTokenizer
):
def
__init__
(
self
,
model
:
str
,
tokenizer
:
Union
[
PreTrainedTokenizer
,
PreTrainedTokenizerFast
],
):
self
.
model
=
model
self
.
model
=
model
self
.
tokenizer
=
tokenizer
self
.
tokenizer
=
tokenizer
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment