Unverified Commit 14c18d25 authored by Yudi Xue's avatar Yudi Xue Committed by GitHub
Browse files

Frontend language separate reasoning support (#6031)

parent 90bd3e32
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Launch A Server\n",
"\n",
"Launch the server with a reasoning model (Qwen 3.5-4B) and reasoning parser."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/workspaces/sglang/.venv/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2025-05-05 17:53:32] server_args=ServerArgs(model_path='Qwen/Qwen3-4B', tokenizer_path='Qwen/Qwen3-4B', tokenizer_mode='auto', skip_tokenizer_init=False, enable_tokenizer_batch_encode=False, load_format='auto', trust_remote_code=False, dtype='auto', kv_cache_dtype='auto', quantization=None, quantization_param_path=None, context_length=None, device='cuda', served_model_name='Qwen/Qwen3-4B', chat_template=None, completion_template=None, is_embedding=False, revision=None, host='0.0.0.0', port=38475, mem_fraction_static=0.88, max_running_requests=None, max_total_tokens=None, chunked_prefill_size=8192, max_prefill_tokens=16384, schedule_policy='fcfs', schedule_conservativeness=1.0, cpu_offload_gb=0, page_size=1, tp_size=1, pp_size=1, max_micro_batch_size=None, stream_interval=1, stream_output=False, random_seed=376691526, constrained_json_whitespace_pattern=None, watchdog_timeout=300, dist_timeout=None, download_dir=None, base_gpu_id=0, gpu_id_step=1, log_level='info', log_level_http=None, log_requests=False, log_requests_level=0, show_time_cost=False, enable_metrics=False, decode_log_interval=40, api_key=None, file_storage_path='sglang_storage', enable_cache_report=False, reasoning_parser='qwen3', dp_size=1, load_balance_method='round_robin', ep_size=1, dist_init_addr=None, nnodes=1, node_rank=0, json_model_override_args='{}', lora_paths=None, max_loras_per_batch=8, lora_backend='triton', attention_backend=None, sampling_backend='flashinfer', grammar_backend='xgrammar', speculative_algorithm=None, speculative_draft_model_path=None, speculative_num_steps=None, speculative_eagle_topk=None, speculative_num_draft_tokens=None, speculative_accept_threshold_single=1.0, speculative_accept_threshold_acc=1.0, speculative_token_map=None, enable_double_sparsity=False, ds_channel_config_path=None, ds_heavy_channel_num=32, ds_heavy_token_num=256, ds_heavy_channel_type='qk', ds_sparse_decode_threshold=4096, disable_radix_cache=False, disable_cuda_graph=False, disable_cuda_graph_padding=False, enable_nccl_nvls=False, disable_outlines_disk_cache=False, disable_custom_all_reduce=False, enable_multimodal=None, disable_overlap_schedule=False, enable_mixed_chunk=False, enable_dp_attention=False, enable_ep_moe=False, enable_deepep_moe=False, deepep_mode='auto', enable_torch_compile=False, torch_compile_max_bs=32, cuda_graph_max_bs=None, cuda_graph_bs=None, torchao_config='', enable_nan_detection=False, enable_p2p_check=False, triton_attention_reduce_in_fp32=False, triton_attention_num_kv_splits=8, num_continuous_decode_steps=1, delete_ckpt_after_loading=False, enable_memory_saver=False, allow_auto_truncate=False, enable_custom_logit_processor=False, tool_call_parser=None, enable_hierarchical_cache=False, hicache_ratio=2.0, hicache_size=0, hicache_write_policy='write_through_selective', flashinfer_mla_disable_ragged=False, warmups=None, moe_dense_tp_size=None, n_share_experts_fusion=0, disable_chunked_prefix_cache=False, disable_fast_image_processor=False, debug_tensor_dump_output_folder=None, debug_tensor_dump_input_file=None, debug_tensor_dump_inject=False, disaggregation_mode='null', disaggregation_bootstrap_port=8998, disaggregation_transfer_backend='mooncake', disaggregation_ib_device=None)\n",
"[2025-05-05 17:53:38] Attention backend not set. Use flashinfer backend by default.\n",
"[2025-05-05 17:53:38] Init torch distributed begin.\n",
"[2025-05-05 17:53:38] Init torch distributed ends. mem usage=0.00 GB\n",
"[2025-05-05 17:53:38] Load weight begin. avail mem=43.89 GB\n",
"[2025-05-05 17:53:39] Using model weights format ['*.safetensors']\n",
"Loading safetensors checkpoint shards: 0% Completed | 0/3 [00:00<?, ?it/s]\n",
"Loading safetensors checkpoint shards: 67% Completed | 2/3 [00:00<00:00, 4.06it/s]\n",
"Loading safetensors checkpoint shards: 100% Completed | 3/3 [00:01<00:00, 2.52it/s]\n",
"Loading safetensors checkpoint shards: 100% Completed | 3/3 [00:01<00:00, 2.73it/s]\n",
"\n",
"[2025-05-05 17:53:40] Load weight end. type=Qwen3ForCausalLM, dtype=torch.bfloat16, avail mem=36.25 GB, mem usage=7.63 GB.\n",
"[2025-05-05 17:53:40] KV Cache is allocated. #tokens: 225647, K size: 15.49 GB, V size: 15.49 GB\n",
"[2025-05-05 17:53:40] Memory pool end. avail mem=4.71 GB\n",
"2025-05-05 17:53:41,152 - INFO - flashinfer.jit: Prebuilt kernels not found, using JIT backend\n",
"[2025-05-05 17:53:41] Capture cuda graph begin. This can take up to several minutes. avail mem=4.09 GB\n",
"[2025-05-05 17:53:41] Capture cuda graph bs [1, 2, 4, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160]\n",
"Capturing batches (avail_mem=4.06 GB): 0%| | 0/23 [00:00<?, ?it/s]2025-05-05 17:53:41,620 - INFO - flashinfer.jit: Loading JIT ops: batch_decode_with_kv_cache_dtype_q_bf16_dtype_kv_bf16_dtype_o_bf16_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False\n",
"2025-05-05 17:53:41,642 - INFO - flashinfer.jit: Finished loading JIT ops: batch_decode_with_kv_cache_dtype_q_bf16_dtype_kv_bf16_dtype_o_bf16_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False\n",
"Capturing batches (avail_mem=2.68 GB): 100%|██████████| 23/23 [00:06<00:00, 3.75it/s]\n",
"[2025-05-05 17:53:47] Capture cuda graph end. Time elapsed: 6.18 s. mem usage=1.41 GB. avail mem=2.67 GB.\n",
"[2025-05-05 17:53:47] max_total_num_tokens=225647, chunked_prefill_size=8192, max_prefill_tokens=16384, max_running_requests=2821, context_len=40960\n",
"[2025-05-05 17:53:48] INFO: Started server process [1104179]\n",
"[2025-05-05 17:53:48] INFO: Waiting for application startup.\n",
"[2025-05-05 17:53:48] INFO: Application startup complete.\n",
"[2025-05-05 17:53:48] INFO: Uvicorn running on http://0.0.0.0:38475 (Press CTRL+C to quit)\n",
"[2025-05-05 17:53:48] INFO: 127.0.0.1:37502 - \"GET /v1/models HTTP/1.1\" 200 OK\n",
"[2025-05-05 17:53:49] INFO: 127.0.0.1:37516 - \"GET /get_model_info HTTP/1.1\" 200 OK\n",
"[2025-05-05 17:53:49] Prefill batch. #new-seq: 1, #new-token: 6, #cached-token: 0, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"2025-05-05 17:53:49,777 - INFO - flashinfer.jit: Loading JIT ops: batch_prefill_with_kv_cache_dtype_q_bf16_dtype_kv_bf16_dtype_o_bf16_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False\n",
"2025-05-05 17:53:49,799 - INFO - flashinfer.jit: Finished loading JIT ops: batch_prefill_with_kv_cache_dtype_q_bf16_dtype_kv_bf16_dtype_o_bf16_dtype_idx_i32_head_dim_qk_128_head_dim_vo_128_posenc_0_use_swa_False_use_logits_cap_False_f16qk_False\n",
"[2025-05-05 17:53:50] INFO: 127.0.0.1:37526 - \"POST /generate HTTP/1.1\" 200 OK\n",
"[2025-05-05 17:53:50] The server is fired up and ready to roll!\n",
"\n",
"\n",
" NOTE: Typically, the server runs in a separate terminal.\n",
" In this notebook, we run the server and notebook code together, so their outputs are combined.\n",
" To improve clarity, the server logs are displayed in the original black color, while the notebook outputs are highlighted in blue.\n",
" We are running those notebooks in a CI parallel environment, so the throughput is not representative of the actual performance.\n",
" \n",
"Server started on http://localhost:38475\n"
]
}
],
"source": [
"from sglang import separate_reasoning, assistant_begin, assistant_end\n",
"from sglang import assistant, function, gen, system, user\n",
"from sglang import image\n",
"from sglang import RuntimeEndpoint, set_default_backend\n",
"from sglang.srt.utils import load_image\n",
"from sglang.test.test_utils import is_in_ci\n",
"from sglang.utils import print_highlight, terminate_process, wait_for_server\n",
"\n",
"\n",
"if is_in_ci():\n",
" from patch import launch_server_cmd\n",
"else:\n",
" from sglang.utils import launch_server_cmd\n",
"\n",
"\n",
"server_process, port = launch_server_cmd(\n",
" \"python3 -m sglang.launch_server --model-path Qwen/Qwen3-4B --reasoning-parser qwen3 --host 0.0.0.0\"\n",
")\n",
"\n",
"wait_for_server(f\"http://localhost:{port}\")\n",
"print(f\"Server started on http://localhost:{port}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Set the default backend. Note: you can set chat_template_name in RontimeEndpoint. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2025-05-05 17:53:53] INFO: 127.0.0.1:37530 - \"GET /get_model_info HTTP/1.1\" 200 OK\n"
]
}
],
"source": [
"set_default_backend(\n",
" RuntimeEndpoint(f\"http://localhost:{port}\", chat_template_name=\"qwen\")\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's start with a basic question-answering task. And see how the reasoning content is generated."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2025-05-05 17:53:53] Prefill batch. #new-seq: 1, #new-token: 31, #cached-token: 0, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2025-05-05 17:53:54] Decode batch. #running-req: 1, #token: 64, token usage: 0.00, gen throughput (token/s): 6.00, #queue-req: 0\n",
"[2025-05-05 17:53:54] Decode batch. #running-req: 1, #token: 104, token usage: 0.00, gen throughput (token/s): 82.06, #queue-req: 0\n",
"[2025-05-05 17:53:55] Decode batch. #running-req: 1, #token: 144, token usage: 0.00, gen throughput (token/s): 81.56, #queue-req: 0\n",
"[2025-05-05 17:53:55] Decode batch. #running-req: 1, #token: 184, token usage: 0.00, gen throughput (token/s): 81.14, #queue-req: 0\n",
"[2025-05-05 17:53:56] Decode batch. #running-req: 1, #token: 224, token usage: 0.00, gen throughput (token/s): 80.91, #queue-req: 0\n",
"[2025-05-05 17:53:56] Decode batch. #running-req: 1, #token: 264, token usage: 0.00, gen throughput (token/s): 80.55, #queue-req: 0\n",
"[2025-05-05 17:53:56] INFO: 127.0.0.1:37538 - \"POST /generate HTTP/1.1\" 200 OK\n",
"<think>\n",
"Okay, the user is asking for three countries and their capitals. Let me think about which countries to choose. I should pick some well-known ones to make it easy for the user.\n",
"\n",
"First, France is a good start because its capital is Paris, which is a major city. Then maybe Germany with Berlin. Those are both in Europe and have clear capitals. \n",
"\n",
"Next, I need a country from another continent. Let's go with Japan, which has Tokyo as its capital. That covers Asia. \n",
"\n",
"Wait, should I check if there are any countries with non-obvious capitals? Maybe not necessary. The user probably wants straightforward answers. \n",
"\n",
"Let me confirm the capitals again. France - Paris, Germany - Berlin, Japan - Tokyo. Yep, that's correct. \n",
"\n",
"I should present them in a clear list. Maybe number them and list each with the capital. Keep it simple and to the point. No need for extra info unless the user asks. \n",
"\n",
"Alright, that should cover it. Three countries, their capitals, correct and easy to understand.\n",
"</think>\n",
"\n",
"1. **France** - Paris \n",
"2. **Germany** - Berlin \n",
"3. **Japan** - Tokyo\n"
]
}
],
"source": [
"@function\n",
"def basic_qa(s, question):\n",
" s += system(f\"You are a helpful assistant than can answer questions.\")\n",
" s += user(question)\n",
" s += assistant_begin()\n",
" s += gen(\"answer\", max_tokens=512)\n",
" s += assistant_end()\n",
"\n",
"\n",
"state = basic_qa(\"List 3 countries and their capitals.\")\n",
"print_highlight(state[\"answer\"])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"With `separate_reasoning`, you can move the reasoning content to `{param_name}_reasoning_content` in the state."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"dict_keys(['answer', 'answer_reasoning_content'])\n",
"[2025-05-05 17:56:44] Prefill batch. #new-seq: 1, #new-token: 1, #cached-token: 30, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2025-05-05 17:56:44] Decode batch. #running-req: 1, #token: 63, token usage: 0.00, gen throughput (token/s): 3.77, #queue-req: 0\n",
"[2025-05-05 17:56:45] Decode batch. #running-req: 1, #token: 103, token usage: 0.00, gen throughput (token/s): 82.12, #queue-req: 0\n",
"[2025-05-05 17:56:45] Decode batch. #running-req: 1, #token: 143, token usage: 0.00, gen throughput (token/s): 81.60, #queue-req: 0\n",
"[2025-05-05 17:56:46] Decode batch. #running-req: 1, #token: 183, token usage: 0.00, gen throughput (token/s): 81.17, #queue-req: 0\n",
"[2025-05-05 17:56:46] Decode batch. #running-req: 1, #token: 223, token usage: 0.00, gen throughput (token/s): 80.90, #queue-req: 0\n",
"[2025-05-05 17:56:46] INFO: 127.0.0.1:45282 - \"POST /generate HTTP/1.1\" 200 OK\n",
"\n",
"Separated Reasoning Content:\n",
"Okay, the user is asking for three countries and their capitals. Let me think. I need to make sure the countries are correct and their capitals are properly matched.\n",
"\n",
"First, I should start with a well-known country. France is a good example. Its capital is Paris. That's straightforward. Next, maybe a country in Asia. Japan's capital is Tokyo. That's correct. Then, perhaps a country in Africa. Egypt's capital is Cairo. Wait, is that right? Yes, Egypt's capital is indeed Cairo. Let me double-check. France - Paris, Japan - Tokyo, Egypt - Cairo. Those are all correct. I should present them in a clear list format. Make sure the country names are spelled correctly and the capitals are properly capitalized. No need for any extra information, just the three pairs. That should answer the user's question effectively.\n",
"\n",
"\n",
"\n",
"Content:\n",
"1. **France** - Paris \n",
"2. **Japan** - Tokyo \n",
"3. **Egypt** - Cairo\n",
"\n",
"\n",
"Messages:\n",
"{'role': 'assistant', 'content': '1. **France** - Paris \\n2. **Japan** - Tokyo \\n3. **Egypt** - Cairo'}\n"
]
}
],
"source": [
"@function\n",
"def basic_qa_separate_reasoning(s, question):\n",
" s += system(f\"You are a helpful assistant than can answer questions.\")\n",
" s += user(question)\n",
" s += assistant_begin()\n",
" s += separate_reasoning(gen(\"answer\", max_tokens=512), model_type=\"qwen3\")\n",
" s += assistant_end()\n",
"\n",
"\n",
"reasoning_state = basic_qa_separate_reasoning(\"List 3 countries and their capitals.\")\n",
"print_highlight(reasoning_state.stream_executor.variable_event.keys())\n",
"print_highlight(\n",
" f\"\\nSeparated Reasoning Content:\\n{reasoning_state['answer_reasoning_content']}\"\n",
")\n",
"\n",
"print_highlight(f\"\\n\\nContent:\\n{reasoning_state['answer']}\")\n",
"print_highlight(f\"\\n\\nMessages:\\n{reasoning_state.messages()[-1]}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"`separate_reasoning` can also be used in multi-turn conversations."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2025-05-05 17:54:03] Decode batch. #running-req: 1, #token: 0, token usage: 0.00, gen throughput (token/s): 79.25, #queue-req: 0\n",
"[2025-05-05 17:54:03] Prefill batch. #new-seq: 1, #new-token: 18, #cached-token: 18, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2025-05-05 17:54:03] Decode batch. #running-req: 1, #token: 77, token usage: 0.00, gen throughput (token/s): 75.90, #queue-req: 0\n",
"[2025-05-05 17:54:04] Decode batch. #running-req: 1, #token: 117, token usage: 0.00, gen throughput (token/s): 81.85, #queue-req: 0\n",
"[2025-05-05 17:54:04] Decode batch. #running-req: 1, #token: 157, token usage: 0.00, gen throughput (token/s): 81.36, #queue-req: 0\n",
"[2025-05-05 17:54:05] Decode batch. #running-req: 1, #token: 197, token usage: 0.00, gen throughput (token/s): 81.01, #queue-req: 0\n",
"[2025-05-05 17:54:05] Decode batch. #running-req: 1, #token: 237, token usage: 0.00, gen throughput (token/s): 80.80, #queue-req: 0\n",
"[2025-05-05 17:54:06] Decode batch. #running-req: 1, #token: 277, token usage: 0.00, gen throughput (token/s): 80.43, #queue-req: 0\n",
"[2025-05-05 17:54:06] Decode batch. #running-req: 1, #token: 317, token usage: 0.00, gen throughput (token/s): 80.10, #queue-req: 0\n",
"[2025-05-05 17:54:07] Decode batch. #running-req: 1, #token: 357, token usage: 0.00, gen throughput (token/s): 79.83, #queue-req: 0\n",
"[2025-05-05 17:54:07] INFO: 127.0.0.1:41424 - \"POST /generate HTTP/1.1\" 200 OK\n",
"\n",
"\n",
"first_answer:\n",
"Here’s a list of three countries and their capitals:\n",
"\n",
"1. **France** – **Paris** \n",
"2. **United States** – **Washington, D.C.** \n",
"3. **Brazil** – **Brasília** \n",
"\n",
"Let me know if you'd like more examples! 😊\n",
"\n",
"\n",
"first_answer_reasoning_content:\n",
"Okay, the user is asking for a list of three countries and their capitals. Let me think about which countries to choose. They might be a student studying geography or someone just curious. I should pick well-known countries to make it easier for them.\n",
"\n",
"First, I'll start with the most obvious ones. France and its capital Paris are a classic example. Then, maybe the United States with Washington, D.C. That's another common one. For the third country, perhaps Brazil with Brasília? Wait, I should make sure I'm correct about the capitals. Let me double-check: France is Paris, USA is Washington, D.C., and Brazil is indeed Brasília. \n",
"\n",
"Alternatively, maybe including a country from a different continent could be better? Like Japan with Tokyo? But the user didn't specify any particular region. Since the first two are from Europe and North America, adding a South American country might be a good mix. \n",
"\n",
"Wait, but the user just asked for three, so as long as they're accurate, it's fine. I'll go with France, USA, and Brazil. Let me make sure I get the spelling right. Paris, Washington D.C., Brasília. Yeah, that's correct. I should present them in a clear list format. The user might need this for a school assignment or a quiz. Alright, that should cover it.\n",
"\n",
"[2025-05-05 17:54:07] Prefill batch. #new-seq: 1, #new-token: 83, #cached-token: 36, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2025-05-05 17:54:07] Decode batch. #running-req: 1, #token: 138, token usage: 0.00, gen throughput (token/s): 76.16, #queue-req: 0\n",
"[2025-05-05 17:54:08] Decode batch. #running-req: 1, #token: 178, token usage: 0.00, gen throughput (token/s): 81.10, #queue-req: 0\n",
"[2025-05-05 17:54:08] Decode batch. #running-req: 1, #token: 218, token usage: 0.00, gen throughput (token/s): 80.91, #queue-req: 0\n",
"[2025-05-05 17:54:09] Decode batch. #running-req: 1, #token: 258, token usage: 0.00, gen throughput (token/s): 80.63, #queue-req: 0\n",
"[2025-05-05 17:54:09] Decode batch. #running-req: 1, #token: 298, token usage: 0.00, gen throughput (token/s): 80.29, #queue-req: 0\n",
"[2025-05-05 17:54:10] Decode batch. #running-req: 1, #token: 338, token usage: 0.00, gen throughput (token/s): 79.96, #queue-req: 0\n",
"[2025-05-05 17:54:10] INFO: 127.0.0.1:47266 - \"POST /generate HTTP/1.1\" 200 OK\n",
"\n",
"\n",
"second_answer:\n",
"Here’s another list of three countries and their capitals:\n",
"\n",
"1. **Nigeria** – **Lagos** \n",
"2. **Japan** – **Tokyo** \n",
"3. **Argentina** – **Buenos Aires** \n",
"\n",
"Let me know if you'd like more examples! 😊\n",
"\n",
"\n",
"second_answer_reasoning_content:\n",
"Okay, the user asked for another list of three countries and their capitals. Let me think about what they might need. They previously got France, the US, and Brazil. Maybe they want more variety or different regions? I should pick countries from different continents to cover a broad range.\n",
"\n",
"First, maybe include a country from Africa. Lagos is the capital of Nigeria, which is a common example. Then, Asia – maybe Japan, with Tokyo. That's a major country. Then, a country from South America, like Argentina with Buenos Aires. That gives a good mix. I should check if those capitals are correct. Lagos is right for Nigeria, Tokyo for Japan, and Buenos Aires for Argentina. Yeah, that works. I'll present them in a list format again, making sure to mention each country and its capital clearly. Make sure the response is friendly and offers further help if needed.\n",
"\n"
]
}
],
"source": [
"@function\n",
"def multi_turn_qa(s):\n",
" s += system(f\"You are a helpful assistant than can answer questions.\")\n",
" s += user(\"Please give me a list of 3 countries and their capitals.\")\n",
" s += assistant(\n",
" separate_reasoning(gen(\"first_answer\", max_tokens=512), model_type=\"qwen3\")\n",
" )\n",
" s += user(\"Please give me another list of 3 countries and their capitals.\")\n",
" s += assistant(\n",
" separate_reasoning(gen(\"second_answer\", max_tokens=512), model_type=\"qwen3\")\n",
" )\n",
" return s\n",
"\n",
"\n",
"reasoning_state = multi_turn_qa()\n",
"print_highlight(f\"\\n\\nfirst_answer:\\n{reasoning_state['first_answer']}\")\n",
"print_highlight(\n",
" f\"\\n\\nfirst_answer_reasoning_content:\\n{reasoning_state['first_answer_reasoning_content']}\"\n",
")\n",
"print_highlight(f\"\\n\\nsecond_answer:\\n{reasoning_state['second_answer']}\")\n",
"print_highlight(\n",
" f\"\\n\\nsecond_answer_reasoning_content:\\n{reasoning_state['second_answer_reasoning_content']}\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using No thinking as Qwen 3's advanced feature \n",
"\n",
"sglang separate_reasoning is particularly useful when combined with Qwen 3's advanced feature.\n",
"\n",
"[Qwen 3's advanced usages](https://qwenlm.github.io/blog/qwen3/#advanced-usages)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2025-05-05 17:54:10] Prefill batch. #new-seq: 1, #new-token: 9, #cached-token: 26, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2025-05-05 17:54:10] Decode batch. #running-req: 1, #token: 51, token usage: 0.00, gen throughput (token/s): 76.50, #queue-req: 0\n",
"[2025-05-05 17:54:10] INFO: 127.0.0.1:47276 - \"POST /generate HTTP/1.1\" 200 OK\n",
"Reasoning Content:\n",
" \n",
"Content:\n",
" 1. France - Paris \n",
"2. Germany - Berlin \n",
"3. Japan - Tokyo\n"
]
}
],
"source": [
"reasoning_state = basic_qa_separate_reasoning(\n",
" \"List 3 countries and their capitals. /no_think\"\n",
")\n",
"print_highlight(f\"Reasoning Content:\\n{reasoning_state['answer_reasoning_content']}\")\n",
"print_highlight(f\"Content:\\n{reasoning_state['answer']}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"`separate_reasoning` can also be used in regular expression generation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@function\n",
"def regular_expression_gen(s):\n",
" s += user(\n",
" \"What is the IP address of the Google DNS servers? just provide the answer\"\n",
" )\n",
" s += assistant(\n",
" separate_reasoning(\n",
" gen(\n",
" \"answer\",\n",
" temperature=0,\n",
" regex=r\"((25[0-5]|2[0-4]\\d|[01]?\\d\\d?).){3}(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\",\n",
" max_tokens=512,\n",
" ),\n",
" model_type=\"qwen3\",\n",
" ),\n",
" )\n",
"\n",
"\n",
"reasoning_state = regular_expression_gen()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2025-05-05 17:54:11] Prefill batch. #new-seq: 1, #new-token: 26, #cached-token: 8, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2025-05-05 17:54:11] Decode batch. #running-req: 1, #token: 68, token usage: 0.00, gen throughput (token/s): 47.33, #queue-req: 0\n",
"[2025-05-05 17:54:12] Decode batch. #running-req: 1, #token: 108, token usage: 0.00, gen throughput (token/s): 83.03, #queue-req: 0\n",
"[2025-05-05 17:54:12] Decode batch. #running-req: 1, #token: 148, token usage: 0.00, gen throughput (token/s): 82.51, #queue-req: 0\n",
"[2025-05-05 17:54:13] Decode batch. #running-req: 1, #token: 188, token usage: 0.00, gen throughput (token/s): 82.06, #queue-req: 0\n",
"[2025-05-05 17:54:13] Decode batch. #running-req: 1, #token: 228, token usage: 0.00, gen throughput (token/s): 81.80, #queue-req: 0\n",
"[2025-05-05 17:54:14] Decode batch. #running-req: 1, #token: 268, token usage: 0.00, gen throughput (token/s): 81.48, #queue-req: 0\n",
"[2025-05-05 17:54:14] Decode batch. #running-req: 1, #token: 308, token usage: 0.00, gen throughput (token/s): 81.14, #queue-req: 0\n",
"[2025-05-05 17:54:15] Decode batch. #running-req: 1, #token: 348, token usage: 0.00, gen throughput (token/s): 80.84, #queue-req: 0\n",
"[2025-05-05 17:54:15] INFO: 127.0.0.1:47290 - \"POST /generate HTTP/1.1\" 200 OK\n",
"Answer:\n",
"2023-10-05\n",
"\n",
"\n",
"Reasoning Content:\n",
"Okay, the user is asking for the IP addresses of Google's DNS servers. Let me recall what I know about DNS servers. Google provides two public DNS servers, right? They're commonly used for their reliability and speed.\n",
"\n",
"I think the primary one is 8.8.8.8. Wait, isn't there another one? Oh yeah, 8.8.4.4. Those are the two main ones. Let me make sure I'm not mixing them up with other providers. For example, Cloudflare uses 1.1.1.1 and 1.0.0.1. But Google's are definitely 8.8.8.8 and 8.8.4.4. \n",
"\n",
"I should check if there are any other IP addresses, but I don't think so. They have two main ones. The user might be looking to set up their DNS settings, so providing both is important. Also, maybe mention that they're both in the same range, which is 8.8.0.0/14. But the user just asked for the IP addresses, so maybe just list them. \n",
"\n",
"Wait, the user said \"just provide the answer,\" so maybe they don't need extra info. But to be thorough, I should confirm that those are the correct ones. Let me think if there's any chance of confusion. No, 8.8.8.8 is the primary, and 8.8.4.4 is the secondary. Yeah, that's right. So the answer is those two IPs.\n",
"\n"
]
}
],
"source": [
"print_highlight(f\"Answer:\\n{reasoning_state['answer']}\")\n",
"print_highlight(\n",
" f\"\\n\\nReasoning Content:\\n{reasoning_state['answer_reasoning_content']}\"\n",
")"
]
}
],
"metadata": {
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
......@@ -68,6 +68,7 @@ The core features include:
:caption: Frontend Tutorial
frontend/frontend.ipynb
frontend/frontend_reasoning.ipynb
frontend/choices_methods.md
.. toctree::
......
......@@ -15,6 +15,7 @@ from sglang.api import (
get_server_info,
image,
select,
separate_reasoning,
set_default_backend,
system,
system_begin,
......@@ -54,6 +55,7 @@ __all__ = [
"get_server_info",
"image",
"select",
"separate_reasoning",
"set_default_backend",
"system",
"system_begin",
......
......@@ -15,6 +15,7 @@ from sglang.lang.ir import (
SglRoleBegin,
SglRoleEnd,
SglSelect,
SglSeparateReasoning,
SglVideo,
)
......@@ -277,3 +278,9 @@ def assistant_begin():
def assistant_end():
return SglRoleEnd("assistant")
def separate_reasoning(
expr: Optional[SglExpr] = None, model_type: Optional[str] = None
):
return SglExprList([expr, SglSeparateReasoning(model_type, expr=expr)])
......@@ -26,6 +26,7 @@ from sglang.lang.ir import (
SglRoleBegin,
SglRoleEnd,
SglSelect,
SglSeparateReasoning,
SglVariable,
SglVarScopeBegin,
SglVarScopeEnd,
......@@ -472,6 +473,8 @@ class StreamExecutor:
self._execute_concatenate_and_append_kv_cache(other)
else:
self._execute_concatenate_and_append_text(other)
elif isinstance(other, SglSeparateReasoning):
self._execute_separate_reasoning(other)
else:
raise ValueError(f"Unknown type: {type(other)}")
......@@ -724,8 +727,44 @@ class StreamExecutor:
src_rids = [state.stream_executor.sid for state in expr.states]
self.backend.concatenate_and_append(src_rids, self.sid)
def _execute_separate_reasoning(self, expr: SglSeparateReasoning):
if self.stream:
# separate reasoning for stream is not supported
return
if (
self.cur_role == "assistant"
and self.num_api_spec_tokens is not None
and self.backend.is_chat_model
):
# Execute the stored lazy generation calls
self.backend.role_end_generate(self)
from sglang.srt.reasoning_parser import ReasoningParser
reasoning_parser = ReasoningParser(expr.model_type)
other = expr.expr
if not other:
return
elif isinstance(other, SglGen) or isinstance(other, SglSelect):
cur_text = self.get_var(other.name)
reasoning, normal_text = reasoning_parser.parse_non_stream(cur_text)
reasoning_name = expr.process_name_for_reasoning(other.name)
self.set_var(other.name, normal_text)
self.set_var(reasoning_name, reasoning)
# the variable is ready to be used
self.variable_event[reasoning_name].set()
self.text_ = self.text_[: self.cur_role_begin_pos] + normal_text
elif isinstance(other, SglExprList):
for x in other.expr_list:
self._execute_separate_reasoning(
SglSeparateReasoning(expr.model_type, x)
)
def _init_var_event(self, expr):
if isinstance(expr, (SglGen, SglSelect, SglVarScopeBegin)):
if isinstance(
expr, (SglGen, SglSelect, SglVarScopeBegin, SglSeparateReasoning)
):
self.variable_event[expr.name] = threading.Event()
if self.stream:
self.stream_var_event[expr.name] = threading.Event()
......
......@@ -606,3 +606,30 @@ class SglCommitLazy(SglExpr):
def __repr__(self):
return "CommitLazy()"
class SglSeparateReasoning(SglExpr):
def __init__(self, model_type: str, expr: SglExpr):
super().__init__()
self.model_type = model_type
self.expr = expr
self.name = None
self._process_expr(expr)
def process_name_for_reasoning(self, name):
if not name:
raise ValueError("name must be provided")
return f"{name}_reasoning_content"
def _process_expr(self, expr):
if isinstance(expr, SglGen):
self.name = self.process_name_for_reasoning(expr.name)
elif isinstance(expr, SglSelect):
self.name = self.process_name_for_reasoning(expr.name)
elif isinstance(expr, SglExprList):
for x in expr.expr_list:
self._process_expr(x)
def __repr__(self):
return f"SeparateReasoning(model_type={self.model_type}, name={self.name})"
......@@ -8,6 +8,8 @@ suites = {
TestFile("test_srt_backend.py"),
# Skip this due to some OPENAI_API_KEY issues
# "test_openai_backend.py",
TestFile("test_separate_reasoning.py"),
TestFile("test_separate_reasoning_execution.py"),
],
}
......
"""
Tests for the separate_reasoning functionality in sglang.
Usage:
python3 -m unittest test/lang/test_separate_reasoning.py
"""
import unittest
from sglang import assistant, gen, separate_reasoning, user
from sglang.lang.ir import SglExprList, SglSeparateReasoning
from sglang.test.test_utils import CustomTestCase
class TestSeparateReasoning(CustomTestCase):
def test_separate_reasoning_creation(self):
"""Test that SglSeparateReasoning objects are created correctly."""
# Test with valid model type and gen expression
test_gen = gen("test")
expr = separate_reasoning(test_gen, model_type="deepseek-r1")
self.assertIsInstance(expr, SglExprList)
self.assertEqual(len(expr.expr_list), 2)
self.assertEqual(expr.expr_list[0], test_gen)
reasoning_expr = expr.expr_list[1]
self.assertIsInstance(reasoning_expr, SglSeparateReasoning)
self.assertEqual(reasoning_expr.model_type, "deepseek-r1")
self.assertEqual(reasoning_expr.name, "test_reasoning_content")
# Test with another valid model type
expr = separate_reasoning(test_gen, model_type="qwen3")
self.assertIsInstance(expr, SglExprList)
self.assertEqual(expr.expr_list[1].model_type, "qwen3")
def test_separate_reasoning_name_processing(self):
"""Test that separate_reasoning correctly processes names."""
test_gen = gen("test_var")
expr = separate_reasoning(test_gen, model_type="deepseek-r1")
reasoning_expr = expr.expr_list[1]
self.assertEqual(reasoning_expr.name, "test_var_reasoning_content")
# Test the process_name_for_reasoning method
self.assertEqual(
reasoning_expr.process_name_for_reasoning("another_var"),
"another_var_reasoning_content",
)
def test_separate_reasoning_repr(self):
"""Test the string representation of SglSeparateReasoning."""
test_gen = gen("test_var")
expr = separate_reasoning(test_gen, model_type="deepseek-r1")
reasoning_expr = expr.expr_list[1]
self.assertEqual(
repr(reasoning_expr),
"SeparateReasoning(model_type=deepseek-r1, name=test_var_reasoning_content)",
)
def test_separate_reasoning_with_invalid_model_type(self):
"""Test that separate_reasoning accepts any model type during creation."""
# Create with invalid model type
test_gen = gen("test")
expr = separate_reasoning(test_gen, model_type="invalid-model")
self.assertIsInstance(expr, SglExprList)
self.assertEqual(expr.expr_list[1].model_type, "invalid-model")
# The actual validation happens in the ReasoningParser constructor
if __name__ == "__main__":
unittest.main()
"""
Tests for the execution of separate_reasoning functionality in sglang.
Usage:
python3 -m unittest test/lang/test_separate_reasoning_execution.py
"""
import threading
import time
import unittest
from unittest.mock import MagicMock, patch
from sglang import assistant, gen, separate_reasoning, user
from sglang.lang.interpreter import StreamExecutor
from sglang.lang.ir import SglGen, SglSeparateReasoning
from sglang.test.test_utils import CustomTestCase
# Helper function to create events that won't block program exit
def create_daemon_event():
event = threading.Event()
return event
class MockReasoningParser:
def __init__(self, model_type):
self.model_type = model_type
self.parse_non_stream_called = False
self.parse_stream_chunk_called = False
def parse_non_stream(self, full_text):
self.parse_non_stream_called = True
# Simulate parsing by adding a prefix to indicate reasoning
reasoning = f"[REASONING from {self.model_type}]: {full_text}"
normal_text = f"[NORMAL from {self.model_type}]: {full_text}"
return reasoning, normal_text
def parse_stream_chunk(self, chunk_text):
self.parse_stream_chunk_called = True
# Simulate parsing by adding a prefix to indicate reasoning
reasoning = f"[REASONING from {self.model_type}]: {chunk_text}"
normal_text = f"[NORMAL from {self.model_type}]: {chunk_text}"
return reasoning, normal_text
class TestSeparateReasoningExecution(CustomTestCase):
def setUp(self):
"""Set up for the test."""
super().setUp()
# Store any events created during the test
self.events = []
def tearDown(self):
"""Clean up any threads that might have been created during the test."""
super().tearDown()
# Set all events to ensure any waiting threads are released
for event in self.events:
event.set()
def tearDown(self):
super().tearDown()
# wake up all threads
for ev in self.events:
ev.set()
@patch("sglang.srt.reasoning_parser.ReasoningParser")
def test_execute_separate_reasoning(self, mock_parser_class):
"""Test that _execute_separate_reasoning correctly calls the ReasoningParser."""
# Setup mock parser
mock_parser = MockReasoningParser("deepseek-r1")
mock_parser_class.return_value = mock_parser
# Create a mock backend to avoid AttributeError in __del__
mock_backend = MagicMock()
# Create a StreamExecutor with necessary setup
executor = StreamExecutor(
backend=mock_backend,
arguments={},
default_sampling_para={},
chat_template={
"role_map": {"user": "user", "assistant": "assistant"}
}, # Simple chat template
stream=False,
use_thread=False,
)
# Set up the executor with a variable and its value
var_name = "test_var"
reasoning_name = f"{var_name}_reasoning_content"
var_value = "Test content"
executor.variables = {var_name: var_value}
# Create events and track them for cleanup
var_event = create_daemon_event()
reasoning_event = create_daemon_event()
self.events.extend([var_event, reasoning_event])
executor.variable_event = {var_name: var_event, reasoning_name: reasoning_event}
executor.variable_event[var_name].set() # Mark as ready
# Set up the current role
executor.cur_role = "assistant"
executor.cur_role_begin_pos = 0
executor.text_ = var_value
# Create a gen expression and a separate_reasoning expression
gen_expr = SglGen(var_name)
expr = SglSeparateReasoning("deepseek-r1", expr=gen_expr)
# Execute separate_reasoning
executor._execute_separate_reasoning(expr)
# Verify that the parser was created with the correct model type
mock_parser_class.assert_called_once_with("deepseek-r1")
# Verify that parse_non_stream was called
self.assertTrue(mock_parser.parse_non_stream_called)
# Verify that the variables were updated correctly
reasoning_name = f"{var_name}_reasoning_content"
self.assertIn(reasoning_name, executor.variables)
self.assertEqual(
executor.variables[reasoning_name],
f"[REASONING from deepseek-r1]: {var_value}",
)
self.assertEqual(
executor.variables[var_name], f"[NORMAL from deepseek-r1]: {var_value}"
)
# Verify that the variable event was set
self.assertIn(reasoning_name, executor.variable_event)
self.assertTrue(executor.variable_event[reasoning_name].is_set())
# Verify that the text was updated
self.assertEqual(executor.text_, f"[NORMAL from deepseek-r1]: {var_value}")
@patch("sglang.srt.reasoning_parser.ReasoningParser")
def test_reasoning_parser_integration(self, mock_parser_class):
"""Test the integration between separate_reasoning and ReasoningParser."""
# Setup mock parsers for different model types
deepseek_parser = MockReasoningParser("deepseek-r1")
qwen_parser = MockReasoningParser("qwen3")
# Configure the mock to return different parsers based on model type
def get_parser(model_type):
if model_type == "deepseek-r1":
return deepseek_parser
elif model_type == "qwen3":
return qwen_parser
else:
raise ValueError(f"Unsupported model type: {model_type}")
mock_parser_class.side_effect = get_parser
# Test with DeepSeek-R1 model
test_text = "This is a test"
reasoning, normal_text = deepseek_parser.parse_non_stream(test_text)
self.assertEqual(reasoning, f"[REASONING from deepseek-r1]: {test_text}")
self.assertEqual(normal_text, f"[NORMAL from deepseek-r1]: {test_text}")
# Test with Qwen3 model
reasoning, normal_text = qwen_parser.parse_non_stream(test_text)
self.assertEqual(reasoning, f"[REASONING from qwen3]: {test_text}")
self.assertEqual(normal_text, f"[NORMAL from qwen3]: {test_text}")
@patch("sglang.srt.reasoning_parser.ReasoningParser")
def test_reasoning_parser_invalid_model(self, mock_parser_class):
"""Test that ReasoningParser raises an error for invalid model types."""
# Configure the mock to raise an error for invalid model types
def get_parser(model_type):
if model_type in ["deepseek-r1", "qwen3"]:
return MockReasoningParser(model_type)
elif model_type is None:
raise ValueError("Model type must be specified")
else:
raise ValueError(f"Unsupported model type: {model_type}")
mock_parser_class.side_effect = get_parser
with self.assertRaises(ValueError) as context:
mock_parser_class("invalid-model")
self.assertIn("Unsupported model type", str(context.exception))
with self.assertRaises(ValueError) as context:
mock_parser_class(None)
self.assertIn("Model type must be specified", str(context.exception))
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment