Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
fdc4e1e5
Unverified
Commit
fdc4e1e5
authored
Oct 03, 2025
by
fzyzcjy
Committed by
GitHub
Oct 03, 2025
Browse files
Tiny move files to utils folder (#11166)
parent
04b86b3c
Changes
66
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
20 additions
and
20 deletions
+20
-20
python/sglang/test/runners.py
python/sglang/test/runners.py
+1
-1
python/sglang/test/test_programs.py
python/sglang/test/test_programs.py
+1
-1
python/sglang/test/test_utils.py
python/sglang/test/test_utils.py
+1
-1
scripts/playground/load_tokenizer.py
scripts/playground/load_tokenizer.py
+1
-1
scripts/playground/reference_hf.py
scripts/playground/reference_hf.py
+1
-1
test/srt/openai_server/basic/test_openai_server.py
test/srt/openai_server/basic/test_openai_server.py
+1
-1
test/srt/openai_server/features/test_enable_thinking.py
test/srt/openai_server/features/test_enable_thinking.py
+1
-1
test/srt/openai_server/features/test_openai_server_ebnf.py
test/srt/openai_server/features/test_openai_server_ebnf.py
+1
-1
test/srt/openai_server/features/test_openai_server_hidden_states.py
...penai_server/features/test_openai_server_hidden_states.py
+1
-1
test/srt/openai_server/function_call/test_openai_function_calling.py
...enai_server/function_call/test_openai_function_calling.py
+1
-1
test/srt/openai_server/function_call/test_tool_choice.py
test/srt/openai_server/function_call/test_tool_choice.py
+1
-1
test/srt/openai_server/validation/test_large_max_new_tokens.py
...srt/openai_server/validation/test_large_max_new_tokens.py
+1
-1
test/srt/openai_server/validation/test_openai_server_ignore_eos.py
...openai_server/validation/test_openai_server_ignore_eos.py
+1
-1
test/srt/rl/test_verl_engine_2_gpu.py
test/srt/rl/test_verl_engine_2_gpu.py
+1
-1
test/srt/rl/test_verl_engine_4_gpu.py
test/srt/rl/test_verl_engine_4_gpu.py
+1
-1
test/srt/test_bench_serving.py
test/srt/test_bench_serving.py
+1
-1
test/srt/test_eagle_infer_a.py
test/srt/test_eagle_infer_a.py
+1
-1
test/srt/test_fim_completion.py
test/srt/test_fim_completion.py
+1
-1
test/srt/test_forward_split_prefill.py
test/srt/test_forward_split_prefill.py
+1
-1
test/srt/test_function_call_parser.py
test/srt/test_function_call_parser.py
+1
-1
No files found.
python/sglang/test/runners.py
View file @
fdc4e1e5
...
@@ -30,8 +30,8 @@ from transformers import (
...
@@ -30,8 +30,8 @@ from transformers import (
)
)
from
sglang.srt.entrypoints.engine
import
Engine
from
sglang.srt.entrypoints.engine
import
Engine
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
load_image
from
sglang.srt.utils
import
load_image
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
DEFAULT_PORT_FOR_SRT_TEST_RUNNER
,
calculate_rouge_l
from
sglang.test.test_utils
import
DEFAULT_PORT_FOR_SRT_TEST_RUNNER
,
calculate_rouge_l
DEFAULT_PROMPTS
=
[
DEFAULT_PROMPTS
=
[
...
...
python/sglang/test/test_programs.py
View file @
fdc4e1e5
...
@@ -551,7 +551,7 @@ def test_gen_min_new_tokens():
...
@@ -551,7 +551,7 @@ def test_gen_min_new_tokens():
We verify that the number of tokens in the answer is >= the min_tokens threshold.
We verify that the number of tokens in the answer is >= the min_tokens threshold.
"""
"""
import
sglang
as
sgl
import
sglang
as
sgl
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.
utils.
hf_transformers_utils
import
get_tokenizer
model_path
=
sgl
.
global_config
.
default_backend
.
endpoint
.
get_model_name
()
model_path
=
sgl
.
global_config
.
default_backend
.
endpoint
.
get_model_name
()
MIN_TOKENS
,
MAX_TOKENS
=
64
,
128
MIN_TOKENS
,
MAX_TOKENS
=
64
,
128
...
...
python/sglang/test/test_utils.py
View file @
fdc4e1e5
...
@@ -921,7 +921,7 @@ def run_score_benchmark(
...
@@ -921,7 +921,7 @@ def run_score_benchmark(
async
def
_run_benchmark
():
async
def
_run_benchmark
():
# Load tokenizer for generating test data
# Load tokenizer for generating test data
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.
utils.
hf_transformers_utils
import
get_tokenizer
tokenizer
=
get_tokenizer
(
model
)
tokenizer
=
get_tokenizer
(
model
)
...
...
scripts/playground/load_tokenizer.py
View file @
fdc4e1e5
import
argparse
import
argparse
import
code
import
code
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.
utils.
hf_transformers_utils
import
get_tokenizer
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
parser
=
argparse
.
ArgumentParser
()
parser
=
argparse
.
ArgumentParser
()
...
...
scripts/playground/reference_hf.py
View file @
fdc4e1e5
...
@@ -38,7 +38,7 @@ from transformers import (
...
@@ -38,7 +38,7 @@ from transformers import (
AutoProcessor
,
AutoProcessor
,
)
)
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.
utils.
hf_transformers_utils
import
get_tokenizer
@
torch
.
no_grad
()
@
torch
.
no_grad
()
...
...
test/srt/openai_server/basic/test_openai_server.py
View file @
fdc4e1e5
...
@@ -13,8 +13,8 @@ import numpy as np
...
@@ -13,8 +13,8 @@ import numpy as np
import
openai
import
openai
import
requests
import
requests
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.runners
import
TEST_RERANK_QUERY_DOCS
from
sglang.test.runners
import
TEST_RERANK_QUERY_DOCS
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_SMALL_CROSS_ENCODER_MODEL_NAME_FOR_TEST
,
DEFAULT_SMALL_CROSS_ENCODER_MODEL_NAME_FOR_TEST
,
...
...
test/srt/openai_server/features/test_enable_thinking.py
View file @
fdc4e1e5
...
@@ -16,8 +16,8 @@ import unittest
...
@@ -16,8 +16,8 @@ import unittest
import
openai
import
openai
import
requests
import
requests
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_ENABLE_THINKING_MODEL_NAME_FOR_TEST
,
DEFAULT_ENABLE_THINKING_MODEL_NAME_FOR_TEST
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
...
...
test/srt/openai_server/features/test_openai_server_ebnf.py
View file @
fdc4e1e5
...
@@ -2,8 +2,8 @@ import re
...
@@ -2,8 +2,8 @@ import re
import
openai
import
openai
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
,
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
...
...
test/srt/openai_server/features/test_openai_server_hidden_states.py
View file @
fdc4e1e5
...
@@ -8,8 +8,8 @@ import numpy as np
...
@@ -8,8 +8,8 @@ import numpy as np
import
openai
import
openai
import
torch
import
torch
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST
,
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST
,
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST
,
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST
,
...
...
test/srt/openai_server/function_call/test_openai_function_calling.py
View file @
fdc4e1e5
...
@@ -4,8 +4,8 @@ import unittest
...
@@ -4,8 +4,8 @@ import unittest
import
openai
import
openai
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
,
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
...
...
test/srt/openai_server/function_call/test_tool_choice.py
View file @
fdc4e1e5
...
@@ -12,8 +12,8 @@ import unittest
...
@@ -12,8 +12,8 @@ import unittest
import
openai
import
openai
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_URL_FOR_TEST
,
DEFAULT_URL_FOR_TEST
,
...
...
test/srt/openai_server/validation/test_large_max_new_tokens.py
View file @
fdc4e1e5
...
@@ -9,8 +9,8 @@ from concurrent.futures import ThreadPoolExecutor
...
@@ -9,8 +9,8 @@ from concurrent.futures import ThreadPoolExecutor
import
openai
import
openai
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
,
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
...
...
test/srt/openai_server/validation/test_openai_server_ignore_eos.py
View file @
fdc4e1e5
import
openai
import
openai
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
,
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
...
...
test/srt/rl/test_verl_engine_2_gpu.py
View file @
fdc4e1e5
...
@@ -19,8 +19,8 @@ from torch.distributed.fsdp.api import (
...
@@ -19,8 +19,8 @@ from torch.distributed.fsdp.api import (
from
transformers
import
AutoModelForCausalLM
from
transformers
import
AutoModelForCausalLM
from
sglang.srt.entrypoints.verl_engine
import
VerlEngine
from
sglang.srt.entrypoints.verl_engine
import
VerlEngine
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
is_port_available
from
sglang.srt.utils
import
is_port_available
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.runners
import
(
from
sglang.test.runners
import
(
HFRunner
,
HFRunner
,
SRTRunner
,
SRTRunner
,
...
...
test/srt/rl/test_verl_engine_4_gpu.py
View file @
fdc4e1e5
...
@@ -19,8 +19,8 @@ from torch.distributed.fsdp.api import (
...
@@ -19,8 +19,8 @@ from torch.distributed.fsdp.api import (
from
transformers
import
AutoModelForCausalLM
from
transformers
import
AutoModelForCausalLM
from
sglang.srt.entrypoints.verl_engine
import
VerlEngine
from
sglang.srt.entrypoints.verl_engine
import
VerlEngine
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
is_port_available
from
sglang.srt.utils
import
is_port_available
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.runners
import
(
from
sglang.test.runners
import
(
HFRunner
,
HFRunner
,
SRTRunner
,
SRTRunner
,
...
...
test/srt/test_bench_serving.py
View file @
fdc4e1e5
...
@@ -4,7 +4,7 @@ import unittest
...
@@ -4,7 +4,7 @@ import unittest
import
requests
import
requests
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.
utils.
hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST
,
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST
,
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST
,
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST
,
...
...
test/srt/test_eagle_infer_a.py
View file @
fdc4e1e5
...
@@ -4,8 +4,8 @@ import requests
...
@@ -4,8 +4,8 @@ import requests
import
torch
import
torch
import
sglang
as
sgl
import
sglang
as
sgl
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST
,
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST
,
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST
,
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST
,
...
...
test/srt/test_fim_completion.py
View file @
fdc4e1e5
...
@@ -2,8 +2,8 @@ import unittest
...
@@ -2,8 +2,8 @@ import unittest
import
openai
import
openai
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_URL_FOR_TEST
,
DEFAULT_URL_FOR_TEST
,
...
...
test/srt/test_forward_split_prefill.py
View file @
fdc4e1e5
...
@@ -13,13 +13,13 @@ import numpy as np
...
@@ -13,13 +13,13 @@ import numpy as np
import
torch
import
torch
from
sglang.srt.configs.model_config
import
ModelConfig
from
sglang.srt.configs.model_config
import
ModelConfig
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.managers.schedule_batch
import
Req
,
ScheduleBatch
from
sglang.srt.managers.schedule_batch
import
Req
,
ScheduleBatch
from
sglang.srt.model_executor.forward_batch_info
import
ForwardBatch
from
sglang.srt.model_executor.forward_batch_info
import
ForwardBatch
from
sglang.srt.model_executor.model_runner
import
ModelRunner
from
sglang.srt.model_executor.model_runner
import
ModelRunner
from
sglang.srt.sampling.sampling_params
import
SamplingParams
from
sglang.srt.sampling.sampling_params
import
SamplingParams
from
sglang.srt.server_args
import
PortArgs
,
ServerArgs
from
sglang.srt.server_args
import
PortArgs
,
ServerArgs
from
sglang.srt.speculative.spec_info
import
SpeculativeAlgorithm
from
sglang.srt.speculative.spec_info
import
SpeculativeAlgorithm
from
sglang.srt.utils.hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
,
CustomTestCase
from
sglang.test.test_utils
import
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
,
CustomTestCase
...
...
test/srt/test_function_call_parser.py
View file @
fdc4e1e5
...
@@ -15,7 +15,7 @@ from sglang.srt.function_call.mistral_detector import MistralDetector
...
@@ -15,7 +15,7 @@ from sglang.srt.function_call.mistral_detector import MistralDetector
from
sglang.srt.function_call.pythonic_detector
import
PythonicDetector
from
sglang.srt.function_call.pythonic_detector
import
PythonicDetector
from
sglang.srt.function_call.qwen3_coder_detector
import
Qwen3CoderDetector
from
sglang.srt.function_call.qwen3_coder_detector
import
Qwen3CoderDetector
from
sglang.srt.function_call.qwen25_detector
import
Qwen25Detector
from
sglang.srt.function_call.qwen25_detector
import
Qwen25Detector
from
sglang.srt.hf_transformers_utils
import
get_tokenizer
from
sglang.srt.
utils.
hf_transformers_utils
import
get_tokenizer
from
sglang.test.test_utils
import
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
from
sglang.test.test_utils
import
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment