Unverified Commit fdc4e1e5 authored by fzyzcjy's avatar fzyzcjy Committed by GitHub
Browse files

Tiny move files to utils folder (#11166)

parent 04b86b3c
...@@ -30,8 +30,8 @@ from transformers import ( ...@@ -30,8 +30,8 @@ from transformers import (
) )
from sglang.srt.entrypoints.engine import Engine from sglang.srt.entrypoints.engine import Engine
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import load_image from sglang.srt.utils import load_image
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import DEFAULT_PORT_FOR_SRT_TEST_RUNNER, calculate_rouge_l from sglang.test.test_utils import DEFAULT_PORT_FOR_SRT_TEST_RUNNER, calculate_rouge_l
DEFAULT_PROMPTS = [ DEFAULT_PROMPTS = [
......
...@@ -551,7 +551,7 @@ def test_gen_min_new_tokens(): ...@@ -551,7 +551,7 @@ def test_gen_min_new_tokens():
We verify that the number of tokens in the answer is >= the min_tokens threshold. We verify that the number of tokens in the answer is >= the min_tokens threshold.
""" """
import sglang as sgl import sglang as sgl
from sglang.srt.hf_transformers_utils import get_tokenizer from sglang.srt.utils.hf_transformers_utils import get_tokenizer
model_path = sgl.global_config.default_backend.endpoint.get_model_name() model_path = sgl.global_config.default_backend.endpoint.get_model_name()
MIN_TOKENS, MAX_TOKENS = 64, 128 MIN_TOKENS, MAX_TOKENS = 64, 128
......
...@@ -921,7 +921,7 @@ def run_score_benchmark( ...@@ -921,7 +921,7 @@ def run_score_benchmark(
async def _run_benchmark(): async def _run_benchmark():
# Load tokenizer for generating test data # Load tokenizer for generating test data
from sglang.srt.hf_transformers_utils import get_tokenizer from sglang.srt.utils.hf_transformers_utils import get_tokenizer
tokenizer = get_tokenizer(model) tokenizer = get_tokenizer(model)
......
import argparse import argparse
import code import code
from sglang.srt.hf_transformers_utils import get_tokenizer from sglang.srt.utils.hf_transformers_utils import get_tokenizer
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
......
...@@ -38,7 +38,7 @@ from transformers import ( ...@@ -38,7 +38,7 @@ from transformers import (
AutoProcessor, AutoProcessor,
) )
from sglang.srt.hf_transformers_utils import get_tokenizer from sglang.srt.utils.hf_transformers_utils import get_tokenizer
@torch.no_grad() @torch.no_grad()
......
...@@ -13,8 +13,8 @@ import numpy as np ...@@ -13,8 +13,8 @@ import numpy as np
import openai import openai
import requests import requests
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import kill_process_tree from sglang.srt.utils import kill_process_tree
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.runners import TEST_RERANK_QUERY_DOCS from sglang.test.runners import TEST_RERANK_QUERY_DOCS
from sglang.test.test_utils import ( from sglang.test.test_utils import (
DEFAULT_SMALL_CROSS_ENCODER_MODEL_NAME_FOR_TEST, DEFAULT_SMALL_CROSS_ENCODER_MODEL_NAME_FOR_TEST,
......
...@@ -16,8 +16,8 @@ import unittest ...@@ -16,8 +16,8 @@ import unittest
import openai import openai
import requests import requests
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import kill_process_tree from sglang.srt.utils import kill_process_tree
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import ( from sglang.test.test_utils import (
DEFAULT_ENABLE_THINKING_MODEL_NAME_FOR_TEST, DEFAULT_ENABLE_THINKING_MODEL_NAME_FOR_TEST,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH, DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
......
...@@ -2,8 +2,8 @@ import re ...@@ -2,8 +2,8 @@ import re
import openai import openai
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import kill_process_tree from sglang.srt.utils import kill_process_tree
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import ( from sglang.test.test_utils import (
DEFAULT_SMALL_MODEL_NAME_FOR_TEST, DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH, DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
......
...@@ -8,8 +8,8 @@ import numpy as np ...@@ -8,8 +8,8 @@ import numpy as np
import openai import openai
import torch import torch
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import kill_process_tree from sglang.srt.utils import kill_process_tree
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import ( from sglang.test.test_utils import (
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST, DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST, DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
......
...@@ -4,8 +4,8 @@ import unittest ...@@ -4,8 +4,8 @@ import unittest
import openai import openai
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import kill_process_tree from sglang.srt.utils import kill_process_tree
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import ( from sglang.test.test_utils import (
DEFAULT_SMALL_MODEL_NAME_FOR_TEST, DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH, DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
......
...@@ -12,8 +12,8 @@ import unittest ...@@ -12,8 +12,8 @@ import unittest
import openai import openai
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import kill_process_tree from sglang.srt.utils import kill_process_tree
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import ( from sglang.test.test_utils import (
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH, DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST, DEFAULT_URL_FOR_TEST,
......
...@@ -9,8 +9,8 @@ from concurrent.futures import ThreadPoolExecutor ...@@ -9,8 +9,8 @@ from concurrent.futures import ThreadPoolExecutor
import openai import openai
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import kill_process_tree from sglang.srt.utils import kill_process_tree
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import ( from sglang.test.test_utils import (
DEFAULT_SMALL_MODEL_NAME_FOR_TEST, DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH, DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
......
import openai import openai
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import kill_process_tree from sglang.srt.utils import kill_process_tree
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import ( from sglang.test.test_utils import (
DEFAULT_SMALL_MODEL_NAME_FOR_TEST, DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH, DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
......
...@@ -19,8 +19,8 @@ from torch.distributed.fsdp.api import ( ...@@ -19,8 +19,8 @@ from torch.distributed.fsdp.api import (
from transformers import AutoModelForCausalLM from transformers import AutoModelForCausalLM
from sglang.srt.entrypoints.verl_engine import VerlEngine from sglang.srt.entrypoints.verl_engine import VerlEngine
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import is_port_available from sglang.srt.utils import is_port_available
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.runners import ( from sglang.test.runners import (
HFRunner, HFRunner,
SRTRunner, SRTRunner,
......
...@@ -19,8 +19,8 @@ from torch.distributed.fsdp.api import ( ...@@ -19,8 +19,8 @@ from torch.distributed.fsdp.api import (
from transformers import AutoModelForCausalLM from transformers import AutoModelForCausalLM
from sglang.srt.entrypoints.verl_engine import VerlEngine from sglang.srt.entrypoints.verl_engine import VerlEngine
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import is_port_available from sglang.srt.utils import is_port_available
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.runners import ( from sglang.test.runners import (
HFRunner, HFRunner,
SRTRunner, SRTRunner,
......
...@@ -4,7 +4,7 @@ import unittest ...@@ -4,7 +4,7 @@ import unittest
import requests import requests
from sglang.srt.hf_transformers_utils import get_tokenizer from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import ( from sglang.test.test_utils import (
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST, DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST, DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
......
...@@ -4,8 +4,8 @@ import requests ...@@ -4,8 +4,8 @@ import requests
import torch import torch
import sglang as sgl import sglang as sgl
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import kill_process_tree from sglang.srt.utils import kill_process_tree
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import ( from sglang.test.test_utils import (
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST, DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST, DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
......
...@@ -2,8 +2,8 @@ import unittest ...@@ -2,8 +2,8 @@ import unittest
import openai import openai
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import kill_process_tree from sglang.srt.utils import kill_process_tree
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import ( from sglang.test.test_utils import (
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH, DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST, DEFAULT_URL_FOR_TEST,
......
...@@ -13,13 +13,13 @@ import numpy as np ...@@ -13,13 +13,13 @@ import numpy as np
import torch import torch
from sglang.srt.configs.model_config import ModelConfig from sglang.srt.configs.model_config import ModelConfig
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.managers.schedule_batch import Req, ScheduleBatch from sglang.srt.managers.schedule_batch import Req, ScheduleBatch
from sglang.srt.model_executor.forward_batch_info import ForwardBatch from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.model_executor.model_runner import ModelRunner from sglang.srt.model_executor.model_runner import ModelRunner
from sglang.srt.sampling.sampling_params import SamplingParams from sglang.srt.sampling.sampling_params import SamplingParams
from sglang.srt.server_args import PortArgs, ServerArgs from sglang.srt.server_args import PortArgs, ServerArgs
from sglang.srt.speculative.spec_info import SpeculativeAlgorithm from sglang.srt.speculative.spec_info import SpeculativeAlgorithm
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import DEFAULT_SMALL_MODEL_NAME_FOR_TEST, CustomTestCase from sglang.test.test_utils import DEFAULT_SMALL_MODEL_NAME_FOR_TEST, CustomTestCase
......
...@@ -15,7 +15,7 @@ from sglang.srt.function_call.mistral_detector import MistralDetector ...@@ -15,7 +15,7 @@ from sglang.srt.function_call.mistral_detector import MistralDetector
from sglang.srt.function_call.pythonic_detector import PythonicDetector from sglang.srt.function_call.pythonic_detector import PythonicDetector
from sglang.srt.function_call.qwen3_coder_detector import Qwen3CoderDetector from sglang.srt.function_call.qwen3_coder_detector import Qwen3CoderDetector
from sglang.srt.function_call.qwen25_detector import Qwen25Detector from sglang.srt.function_call.qwen25_detector import Qwen25Detector
from sglang.srt.hf_transformers_utils import get_tokenizer from sglang.srt.utils.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import DEFAULT_SMALL_MODEL_NAME_FOR_TEST from sglang.test.test_utils import DEFAULT_SMALL_MODEL_NAME_FOR_TEST
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment