Unverified Commit 6fa6f38e authored by mlmz's avatar mlmz Committed by GitHub
Browse files

Feat: add support for thinking mode via chat_template_kwargs.enable_t… (#5551)


Co-authored-by: default avatarshuaills <shishuaiuoe@gmail.com>
Co-authored-by: default avatarChayenne <zhaochen20@outlook.com>
Co-authored-by: default avatarLianmin Zheng <lianminzheng@gmail.com>
Co-authored-by: default avatarYineng Zhang <me@zhyncs.com>
parent 693723d1
...@@ -1001,6 +1001,11 @@ def v1_chat_generate_request( ...@@ -1001,6 +1001,11 @@ def v1_chat_generate_request(
tokenize=True, tokenize=True,
add_generation_prompt=True, add_generation_prompt=True,
tools=tools, tools=tools,
**(
request.chat_template_kwargs
if request.chat_template_kwargs
else {}
),
) )
except: except:
# This except branch will be triggered when the chosen model # This except branch will be triggered when the chosen model
...@@ -1012,6 +1017,11 @@ def v1_chat_generate_request( ...@@ -1012,6 +1017,11 @@ def v1_chat_generate_request(
tokenize=True, tokenize=True,
add_generation_prompt=True, add_generation_prompt=True,
tools=tools, tools=tools,
**(
request.chat_template_kwargs
if request.chat_template_kwargs
else {}
),
) )
if assistant_prefix: if assistant_prefix:
...@@ -1245,16 +1255,34 @@ def v1_chat_generate_response( ...@@ -1245,16 +1255,34 @@ def v1_chat_generate_response(
tool_calls = None tool_calls = None
text = ret_item["text"] text = ret_item["text"]
enable_thinking = True
if isinstance(request, list): if isinstance(request, list):
tool_choice = request[idx].tool_choice tool_choice = request[idx].tool_choice
tools = request[idx].tools tools = request[idx].tools
separate_reasoning = request[idx].separate_reasoning separate_reasoning = request[idx].separate_reasoning
if (
request[idx].chat_template_kwargs
and request[idx].chat_template_kwargs.get("enable_thinking") is not None
):
enable_thinking = request[idx].chat_template_kwargs.get(
"enable_thinking", True
)
else: else:
tool_choice = request.tool_choice tool_choice = request.tool_choice
tools = request.tools tools = request.tools
separate_reasoning = request.separate_reasoning separate_reasoning = request.separate_reasoning
if reasoning_parser and separate_reasoning: if (
request.chat_template_kwargs
and request.chat_template_kwargs.get("enable_thinking") is not None
):
enable_thinking = request.chat_template_kwargs.get(
"enable_thinking", True
)
reasoning_text = None
if reasoning_parser and separate_reasoning and enable_thinking:
try: try:
parser = ReasoningParser( parser = ReasoningParser(
model_type=reasoning_parser, stream_reasoning=False model_type=reasoning_parser, stream_reasoning=False
...@@ -1266,8 +1294,6 @@ def v1_chat_generate_response( ...@@ -1266,8 +1294,6 @@ def v1_chat_generate_response(
HTTPStatus.BAD_REQUEST, HTTPStatus.BAD_REQUEST,
"Failed to parse reasoning related info to json format!", "Failed to parse reasoning related info to json format!",
) )
else:
reasoning_text = None
if tool_choice != "none" and tools: if tool_choice != "none" and tools:
parser = FunctionCallParser(tools, tool_call_parser) parser = FunctionCallParser(tools, tool_call_parser)
......
...@@ -361,6 +361,7 @@ class ChatCompletionRequest(BaseModel): ...@@ -361,6 +361,7 @@ class ChatCompletionRequest(BaseModel):
session_params: Optional[Dict] = None session_params: Optional[Dict] = None
separate_reasoning: bool = True separate_reasoning: bool = True
stream_reasoning: bool = True stream_reasoning: bool = True
chat_template_kwargs: Optional[Dict] = None
# For PD disaggregation # For PD disaggregation
bootstrap_host: Optional[str] = None bootstrap_host: Optional[str] = None
......
...@@ -117,6 +117,29 @@ class DeepSeekR1Detector(BaseReasoningFormatDetector): ...@@ -117,6 +117,29 @@ class DeepSeekR1Detector(BaseReasoningFormatDetector):
# https://github.com/sgl-project/sglang/pull/3202#discussion_r1950153599 # https://github.com/sgl-project/sglang/pull/3202#discussion_r1950153599
class Qwen3Detector(BaseReasoningFormatDetector):
"""
Detector for Qwen3 model.
Assumes reasoning format:
(<think>)*(.*)</think>
Returns all the text before the </think> tag as `reasoning_text`
and the rest of the text as `normal_text`.
Args:
stream_reasoning (bool): If False, accumulates reasoning content until the end tag.
If True, streams reasoning content as it arrives.
"""
def __init__(self, stream_reasoning: bool = True):
# Qwen3 is assumed to be reasoning until `</think>` token
super().__init__(
"<think>",
"</think>",
force_reasoning=True,
stream_reasoning=stream_reasoning,
)
class ReasoningParser: class ReasoningParser:
""" """
Parser that handles both streaming and non-streaming scenarios for extracting Parser that handles both streaming and non-streaming scenarios for extracting
...@@ -129,7 +152,8 @@ class ReasoningParser: ...@@ -129,7 +152,8 @@ class ReasoningParser:
""" """
DetectorMap: Dict[str, BaseReasoningFormatDetector] = { DetectorMap: Dict[str, BaseReasoningFormatDetector] = {
"deepseek-r1": DeepSeekR1Detector "deepseek-r1": DeepSeekR1Detector,
"qwen3": Qwen3Detector,
} }
def __init__(self, model_type: str = None, stream_reasoning: bool = True): def __init__(self, model_type: str = None, stream_reasoning: bool = True):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment