Unverified Commit 38c00ed7 authored by Lianmin Zheng's avatar Lianmin Zheng Committed by GitHub
Browse files

Fix multimodal registry and code sync scripts (#10759)


Co-authored-by: default avatarcctry <shiyang@x.ai>
parent d4041a5e
......@@ -12,8 +12,7 @@ logger = logging.getLogger(__name__)
PROCESSOR_MAPPING = {}
def import_processors():
package_name = "sglang.srt.multimodal.processors"
def import_processors(package_name: str):
package = importlib.import_module(package_name)
for _, name, ispkg in pkgutil.iter_modules(package.__path__, package_name + "."):
if not ispkg:
......
......@@ -185,7 +185,7 @@ class TokenizerManager(TokenizerCommunicatorMixin):
)
if self.model_config.is_multimodal:
import_processors()
import_processors("sglang.srt.multimodal.processors")
try:
_processor = get_processor(
server_args.tokenizer_path,
......
......@@ -66,8 +66,8 @@ from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.model_loader.weight_utils import default_weight_loader
from sglang.srt.utils import add_prefix
tp_size = get_tensor_model_parallel_world_size()
tp_rank = get_tensor_model_parallel_rank()
tp_size: Optional[int] = None
tp_rank: Optional[int] = None
def gate_up_proj_weight_loader(
......@@ -341,6 +341,13 @@ class LlamaModel(nn.Module):
quant_config: Optional[QuantizationConfig] = None,
) -> None:
super().__init__()
global tp_size, tp_rank
if tp_size is None:
tp_size = get_tensor_model_parallel_world_size()
if tp_rank is None:
tp_rank = get_tensor_model_parallel_rank()
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
......
......@@ -8,6 +8,7 @@ import logging
import os
import random
import socket
import ssl
import subprocess
import sys
import time
......@@ -158,7 +159,15 @@ def http_request(
data = bytes(dumps(json), encoding="utf-8")
try:
resp = urllib.request.urlopen(req, data=data, cafile=verify)
if sys.version_info >= (3, 13):
# Python 3.13+: Use SSL context (cafile removed)
if verify and isinstance(verify, str):
context = ssl.create_default_context(cafile=verify)
else:
context = ssl.create_default_context()
resp = urllib.request.urlopen(req, data=data, context=context)
else:
resp = urllib.request.urlopen(req, data=data, cafile=verify)
return HttpResponse(resp)
except urllib.error.HTTPError as e:
return HttpResponse(e)
......
......@@ -16,6 +16,7 @@ python3 -c 'import os, shutil, tempfile, getpass; cache_dir = os.environ.get("TO
# Kill existing processes
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
bash "${SCRIPT_DIR}/../killall_sglang.sh"
echo "CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-}"
# Install apt packages
apt install -y git libnuma-dev
......@@ -90,5 +91,3 @@ fi
# Show current packages
$PIP_CMD list
echo "CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-}"
......@@ -43,13 +43,17 @@ folder_names = [
"docker",
"docs",
"examples",
"sgl-kernel",
"README.md",
"python/sglang/lang",
"python/sglang/srt",
"python/sglang/test",
"python/sglang/__init__.py",
"python/sglang/utils.py",
"python/sglang/README.md",
"sgl-kernel",
"test/lang",
"test/srt",
"test/README.md",
"README.md",
]
private_repo = "your-org/sglang-private-repo"
......
......@@ -43,13 +43,17 @@ folder_names = [
"docker",
"docs",
"examples",
"sgl-kernel",
"README.md",
"python/sglang/lang",
"python/sglang/srt",
"python/sglang/test",
"python/sglang/__init__.py",
"python/sglang/utils.py",
"python/sglang/README.md",
"sgl-kernel",
"test/lang",
"test/srt",
"test/README.md",
"README.md",
]
# --- Configuration End ---
......@@ -395,9 +399,10 @@ def main():
pr_title = f"[Auto Sync] Update {filename_list_str} ({current_date})"
pr_body = (
f"Sync changes from commit `{short_hash}`.\n\n"
f"**Relevant Files Changed:**\n{file_list_str}"
"\n\n---\n\n"
"*This is an automated PR created by a script.*"
f"**Files Changed:**\n{file_list_str}\n\n"
f"Author: {author_name} <{author_email}>"
f"\n\n---\n\n"
f"*This is an automated PR created by scripts/copy_from_oss.py.*"
)
# 5. Create branch, apply patch, and push
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment