Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dynamo
Commits
5d89a0c8
Unverified
Commit
5d89a0c8
authored
May 07, 2025
by
ptarasiewiczNV
Committed by
GitHub
May 07, 2025
Browse files
fix: Create default sampling params only once during initialization (#982)
parent
af9ee90e
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
5 additions
and
2 deletions
+5
-2
examples/llm/components/processor.py
examples/llm/components/processor.py
+1
-0
examples/llm/utils/chat_processor.py
examples/llm/utils/chat_processor.py
+4
-2
No files found.
examples/llm/components/processor.py
View file @
5d89a0c8
...
...
@@ -63,6 +63,7 @@ class Processor(ProcessMixIn):
class_name
=
self
.
__class__
.
__name__
self
.
engine_args
=
parse_vllm_args
(
class_name
,
""
)
self
.
model_config
=
self
.
engine_args
.
create_model_config
()
self
.
default_sampling_params
=
self
.
model_config
.
get_diff_sampling_param
()
self
.
tokenizer
=
self
.
_create_tokenizer
(
self
.
engine_args
)
self
.
chat_processor
=
ChatProcessor
(
self
.
tokenizer
,
self
.
model_config
)
self
.
completions_processor
=
CompletionsProcessor
(
...
...
examples/llm/utils/chat_processor.py
View file @
5d89a0c8
...
...
@@ -29,6 +29,7 @@ from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
from
vllm.entrypoints.openai.serving_completion
import
OpenAIServingCompletion
from
vllm.entrypoints.openai.serving_engine
import
RequestPrompt
from
vllm.inputs.data
import
TokensPrompt
from
vllm.sampling_params
import
SamplingParams
from
vllm.transformers_utils.tokenizer
import
AnyTokenizer
...
...
@@ -38,6 +39,7 @@ class ProcessMixInRequired(Protocol):
chat_processor
:
"ChatProcessor | None"
completions_processor
:
"CompletionsProcessor | None"
model_config
:
ModelConfig
default_sampling_params
:
SamplingParams
class
ProcessMixIn
(
ProcessMixInRequired
):
...
...
@@ -50,6 +52,7 @@ class ProcessMixIn(ProcessMixInRequired):
chat_processor
:
"ChatProcessor | None"
completions_processor
:
"CompletionsProcessor | None"
model_config
:
ModelConfig
default_sampling_params
:
SamplingParams
def
__init__
(
self
):
pass
...
...
@@ -76,11 +79,10 @@ class ProcessMixIn(ProcessMixInRequired):
default_max_tokens
=
self
.
model_config
.
max_model_len
-
len
(
preprocess_result
.
engine_prompt
[
"prompt_token_ids"
]
)
default_sampling_params
=
self
.
model_config
.
get_diff_sampling_param
()
sampling_params
=
request
.
to_sampling_params
(
default_max_tokens
,
self
.
model_config
.
logits_processor_pattern
,
default_sampling_params
,
self
.
default_sampling_params
,
)
return
(
request
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment