Commit 909abb58 authored by maxiao's avatar maxiao
Browse files

adapt to sglang v0.5.2rc1 on dcu

parents
# launch server
# python -m sglang.launch_server --model mistralai/Mistral-7B-Instruct-v0.3 --lora-paths /home/ying/test_lora lora1=/home/ying/test_lora_1 lora2=/home/ying/test_lora_2 --disable-radix --disable-cuda-graph --max-loras-per-batch 4
# send requests
# lora_path[i] specifies the LoRA used for text[i], so make sure they have the same length
# use None to specify base-only prompt, e.x. "lora_path": [None, "/home/ying/test_lora"]
import json
import requests
url = "http://127.0.0.1:30000"
json_data = {
"text": [
"prompt 1",
"prompt 2",
"prompt 3",
"prompt 4",
"prompt 5",
"prompt 6",
"prompt 7",
],
"sampling_params": {"max_new_tokens": 32},
"lora_path": [
"/home/ying/test_lora",
"lora1",
"lora2",
"lora1",
"lora2",
None,
None,
],
}
response = requests.post(
url + "/generate",
json=json_data,
)
print(json.dumps(response.json()))
"""
Usage:
# Installing latest llava-next: pip install git+https://github.com/LLaVA-VL/LLaVA-NeXT.git
# Installing latest sglang.
# Endpoint Service CLI:
python -m sglang.launch_server --model-path lmms-lab/llama3-llava-next-8b --port=30000
python3 llama3_llava_server.py
Output:
"Friends posing for a fun photo with a life-sized teddy bear, creating a playful and memorable moment."
"""
import argparse
import asyncio
import copy
import json
import aiohttp
import requests
from llava.conversation import conv_llava_llama_3
async def send_request(url, data, delay=0):
await asyncio.sleep(delay)
async with aiohttp.ClientSession() as session:
async with session.post(url, json=data) as resp:
output = await resp.json()
return output
async def test_concurrent(args):
url = f"{args.host}:{args.port}"
prompt = "<image>\nPlease generate caption towards this image."
conv_template = copy.deepcopy(conv_llava_llama_3)
conv_template.append_message(role=conv_template.roles[0], message=prompt)
conv_template.append_message(role=conv_template.roles[1], message=None)
prompt_with_template = conv_template.get_prompt()
response = []
for i in range(1):
response.append(
send_request(
url + "/generate",
{
"text": prompt_with_template,
"image_data": "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg",
"sampling_params": {
"max_new_tokens": 1024,
"temperature": 0,
"top_p": 1.0,
"presence_penalty": 2,
"frequency_penalty": 2,
"stop": "<|eot_id|>",
},
},
)
)
rets = await asyncio.gather(*response)
for ret in rets:
print(ret["text"])
def test_streaming(args):
url = f"{args.host}:{args.port}"
prompt = "<image>\nPlease generate caption towards this image."
conv_template = copy.deepcopy(conv_llava_llama_3)
conv_template.append_message(role=conv_template.roles[0], message=prompt)
conv_template.append_message(role=conv_template.roles[1], message=None)
prompt_with_template = conv_template.get_prompt()
pload = {
"text": prompt_with_template,
"sampling_params": {
"max_new_tokens": 1024,
"temperature": 0,
"top_p": 1.0,
"presence_penalty": 2,
"frequency_penalty": 2,
"stop": "<|eot_id|>",
},
"image_data": "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg",
"stream": True,
}
response = requests.post(
url + "/generate",
json=pload,
stream=True,
)
prev = 0
for chunk in response.iter_lines(decode_unicode=False):
chunk = chunk.decode("utf-8")
if chunk and chunk.startswith("data:"):
if chunk == "data: [DONE]":
break
data = json.loads(chunk[5:].strip("\n"))
output = data["text"].strip()
print(output[prev:], end="", flush=True)
prev = len(output)
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="http://127.0.0.1")
parser.add_argument("--port", type=int, default=30000)
args = parser.parse_args()
asyncio.run(test_concurrent(args))
test_streaming(args)
"""
Usage:
python3 -m sglang.launch_server --model-path lmms-lab/llava-onevision-qwen2-72b-ov --port=30000 --tp-size=8
python3 llava_onevision_server.py
"""
import base64
import io
import os
import sys
import time
import numpy as np
import openai
import requests
from decord import VideoReader, cpu
from PIL import Image
# pip install httpx==0.23.3
# pip install decord
# pip install protobuf==3.20.0
def download_video(url, cache_dir):
file_path = os.path.join(cache_dir, "jobs.mp4")
os.makedirs(cache_dir, exist_ok=True)
response = requests.get(url)
response.raise_for_status()
with open(file_path, "wb") as f:
f.write(response.content)
print(f"File downloaded and saved to: {file_path}")
return file_path
def create_openai_client(base_url):
return openai.Client(api_key="EMPTY", base_url=base_url)
def image_stream_request_test(client):
print("----------------------Image Stream Request Test----------------------")
stream_request = client.chat.completions.create(
model="default",
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/sgl-project/sglang/main/assets/logo.png"
},
},
{
"type": "text",
"text": "Please describe this image. Please list the benchmarks and the models.",
},
],
},
],
temperature=0.7,
max_tokens=1024,
stream=True,
)
stream_response = ""
for chunk in stream_request:
if chunk.choices[0].delta.content is not None:
content = chunk.choices[0].delta.content
stream_response += content
sys.stdout.write(content)
sys.stdout.flush()
print("-" * 30)
def multi_image_stream_request_test(client):
print(
"----------------------Multi-Images Stream Request Test----------------------"
)
stream_request = client.chat.completions.create(
model="default",
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/sgl-project/sglang/main/assets/logo.png"
},
"modalities": "multi-images",
},
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/sgl-project/sglang/main/test/lang/example_image.png"
},
"modalities": "multi-images",
},
{
"type": "text",
"text": "I have shown you two images. Please describe the two images to me.",
},
],
},
],
temperature=0.7,
max_tokens=1024,
stream=True,
)
stream_response = ""
for chunk in stream_request:
if chunk.choices[0].delta.content is not None:
content = chunk.choices[0].delta.content
stream_response += content
sys.stdout.write(content)
sys.stdout.flush()
print("-" * 30)
def video_stream_request_test(client, video_path):
print("------------------------Video Stream Request Test----------------------")
messages = prepare_video_messages(video_path)
video_request = client.chat.completions.create(
model="default",
messages=messages,
temperature=0,
max_tokens=1024,
stream=True,
)
print("-" * 30)
video_response = ""
for chunk in video_request:
if chunk.choices[0].delta.content is not None:
content = chunk.choices[0].delta.content
video_response += content
sys.stdout.write(content)
sys.stdout.flush()
print("-" * 30)
def image_speed_test(client):
print("----------------------Image Speed Test----------------------")
start_time = time.perf_counter()
request = client.chat.completions.create(
model="default",
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/sgl-project/sglang/main/assets/logo.png"
},
},
{
"type": "text",
"text": "Please describe this image. Please list the benchmarks and the models.",
},
],
},
],
temperature=0,
max_tokens=1024,
)
end_time = time.perf_counter()
response = request.choices[0].message.content
print(response)
print("-" * 30)
print_speed_test_results(request, start_time, end_time)
def video_speed_test(client, video_path):
print("------------------------Video Speed Test------------------------")
messages = prepare_video_messages(video_path)
start_time = time.perf_counter()
video_request = client.chat.completions.create(
model="default",
messages=messages,
temperature=0,
max_tokens=1024,
)
end_time = time.perf_counter()
video_response = video_request.choices[0].message.content
print(video_response)
print("-" * 30)
print_speed_test_results(video_request, start_time, end_time)
def prepare_video_messages(video_path):
max_frames_num = 32
vr = VideoReader(video_path, ctx=cpu(0))
total_frame_num = len(vr)
uniform_sampled_frames = np.linspace(
0, total_frame_num - 1, max_frames_num, dtype=int
)
frame_idx = uniform_sampled_frames.tolist()
frames = vr.get_batch(frame_idx).asnumpy()
base64_frames = []
for frame in frames:
pil_img = Image.fromarray(frame)
buff = io.BytesIO()
pil_img.save(buff, format="JPEG")
base64_str = base64.b64encode(buff.getvalue()).decode("utf-8")
base64_frames.append(base64_str)
messages = [{"role": "user", "content": []}]
for base64_frame in base64_frames:
frame_format = {
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{base64_frame}"},
"modalities": "video",
}
messages[0]["content"].append(frame_format)
prompt = {"type": "text", "text": "Please describe the video in detail."}
messages[0]["content"].append(prompt)
return messages
def print_speed_test_results(request, start_time, end_time):
total_tokens = request.usage.total_tokens
completion_tokens = request.usage.completion_tokens
prompt_tokens = request.usage.prompt_tokens
print(f"Total tokens: {total_tokens}")
print(f"Completion tokens: {completion_tokens}")
print(f"Prompt tokens: {prompt_tokens}")
print(f"Time taken: {end_time - start_time} seconds")
print(f"Token per second: {total_tokens / (end_time - start_time)}")
print(f"Completion token per second: {completion_tokens / (end_time - start_time)}")
print(f"Prompt token per second: {prompt_tokens / (end_time - start_time)}")
def main():
url = "https://raw.githubusercontent.com/EvolvingLMMs-Lab/sglang/dev/onevision_local/assets/jobs.mp4"
cache_dir = os.path.expanduser("~/.cache")
video_path = download_video(url, cache_dir)
client = create_openai_client("http://127.0.0.1:30000/v1")
image_stream_request_test(client)
multi_image_stream_request_test(client)
video_stream_request_test(client, video_path)
image_speed_test(client)
video_speed_test(client, video_path)
if __name__ == "__main__":
main()
"""
Usage:
# Run a Pixtral model with SGLang:
# HuggingFace:
python -m sglang.launch_server --model-path mistral-community/pixtral-12b --port=30000
# ModelScope:
python -m sglang.launch_server --model-path AI-ModelScope/pixtral-12b --port=30000
# Then test it with:
python pixtral_server.py
This script tests Pixtral model with both single and multiple images.
"""
import argparse
import asyncio
import json
import aiohttp
import requests
IMAGE_TOKEN_SEP = "\n[IMG]"
ROUTE = "/generate"
async def send_request(url, data, delay=0):
await asyncio.sleep(delay)
async with aiohttp.ClientSession() as session:
async with session.post(url, json=data) as resp:
output = await resp.json()
return output
async def test_concurrent(args):
url = f"{args.host}:{args.port}{ROUTE}"
# Single image test
if args.single_image:
prompt = f"<s>[INST]Describe this image in detail.{IMAGE_TOKEN_SEP}[/INST]"
image_url = "https://picsum.photos/id/237/400/300"
modality = ["image"]
# Multiple images test
else:
image_urls = [
"https://picsum.photos/id/237/400/300",
"https://picsum.photos/id/27/500/500",
]
prompt = f"<s>[INST]How many photos are there? Describe each in a very short sentence.{IMAGE_TOKEN_SEP * len(image_urls)}[/INST]"
image_url = image_urls
modality = ["multi-images"]
response = await send_request(
url,
{
"text": prompt,
"image_data": image_url,
"sampling_params": {
"max_new_tokens": 100,
"temperature": 0.7,
"top_p": 0.9,
},
"modalities": modality,
},
)
print(f"Response: {response}")
if "text" in response:
print("\nOutput text:", response["text"])
def test_streaming(args):
url = f"{args.host}:{args.port}/generate"
# Single image test
if args.single_image:
prompt = f"<s>[INST]Describe this image in detail.{IMAGE_TOKEN_SEP}[/INST]"
image_data = "https://picsum.photos/id/237/400/300"
modality = ["image"]
# Multiple images test
else:
image_urls = [
"https://picsum.photos/id/237/400/300",
"https://picsum.photos/id/27/500/500",
]
prompt = f"<s>[INST]How many photos are there? Describe each in a very short sentence.{IMAGE_TOKEN_SEP * len(image_urls)}[/INST]"
image_data = image_urls
modality = ["multi-images"]
pload = {
"text": prompt,
"image_data": image_data,
"sampling_params": {"max_new_tokens": 100, "temperature": 0.7, "top_p": 0.9},
"modalities": modality,
"stream": True,
}
response = requests.post(url, json=pload, stream=True)
print("Streaming response:")
prev = 0
for chunk in response.iter_lines(decode_unicode=False):
chunk = chunk.decode("utf-8")
if chunk and chunk.startswith("data:"):
if chunk == "data: [DONE]":
break
data = json.loads(chunk[5:].strip("\n"))
output = data["text"].strip()
print(output[prev:], end="", flush=True)
prev = len(output)
print("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="http://127.0.0.1")
parser.add_argument("--port", type=int, default=30000)
parser.add_argument(
"--single-image",
action="store_true",
help="Test with single image instead of multiple images",
)
parser.add_argument("--no-stream", action="store_true", help="Don't test streaming")
args = parser.parse_args()
asyncio.run(test_concurrent(args))
if not args.no_stream:
test_streaming(args)
"""
Usage:
# Installing latest llava-next: pip install git+https://github.com/LLaVA-VL/LLaVA-NeXT.git
# Installing latest sglang.
# Endpoint Service CLI:
python -m sglang.launch_server --model-path lmms-lab/llava-next-72b --port=30000 --tp-size=8
python3 qwen_llava_server.py
Output:
"Two children pose with a large teddy bear, one holding a smaller stuffed bear, in a room with an American flag and potted plants."
"""
import argparse
import asyncio
import copy
import json
import aiohttp
import requests
from llava.conversation import conv_qwen
async def send_request(url, data, delay=0):
await asyncio.sleep(delay)
async with aiohttp.ClientSession() as session:
async with session.post(url, json=data) as resp:
output = await resp.json()
return output
async def test_concurrent(args):
url = f"{args.host}:{args.port}"
prompt = "<image>\nPlease generate caption towards this image."
conv_template = copy.deepcopy(conv_qwen)
conv_template.append_message(role=conv_template.roles[0], message=prompt)
conv_template.append_message(role=conv_template.roles[1], message=None)
prompt_with_template = conv_template.get_prompt()
response = []
for i in range(1):
response.append(
send_request(
url + "/generate",
{
"text": prompt_with_template,
"image_data": "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg",
"sampling_params": {
"max_new_tokens": 1024,
"temperature": 0,
"top_p": 1.0,
"presence_penalty": 2,
"frequency_penalty": 2,
"stop": "<|im_end|>",
},
},
)
)
rets = await asyncio.gather(*response)
for ret in rets:
print(ret["text"])
def test_streaming(args):
url = f"{args.host}:{args.port}"
prompt = "<image>\nPlease generate caption towards this image."
conv_template = copy.deepcopy(conv_qwen)
conv_template.append_message(role=conv_template.roles[0], message=prompt)
conv_template.append_message(role=conv_template.roles[1], message=None)
prompt_with_template = conv_template.get_prompt()
pload = {
"text": prompt_with_template,
"sampling_params": {
"max_new_tokens": 1024,
"temperature": 0,
"top_p": 1.0,
"presence_penalty": 2,
"frequency_penalty": 2,
"stop": "<|im_end|>",
},
"image_data": "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg",
"stream": True,
}
response = requests.post(
url + "/generate",
json=pload,
stream=True,
)
prev = 0
for chunk in response.iter_lines(decode_unicode=False):
chunk = chunk.decode("utf-8")
if chunk and chunk.startswith("data:"):
if chunk == "data: [DONE]":
break
data = json.loads(chunk[5:].strip("\n"))
output = data["text"].strip()
print(output[prev:], end="", flush=True)
prev = len(output)
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="http://127.0.0.1")
parser.add_argument("--port", type=int, default=30000)
args = parser.parse_args()
asyncio.run(test_concurrent(args))
test_streaming(args)
# launch server
# python -m sglang.launch_server --model-path Alibaba-NLP/gme-Qwen2-VL-2B-Instruct --is-embedding
import requests
url = "http://127.0.0.1:30000"
text_input = "Represent this image in embedding space."
image_path = "https://huggingface.co/datasets/liuhaotian/llava-bench-in-the-wild/resolve/main/images/023.jpg"
payload = {
"model": "gme-qwen2-vl",
"input": [{"text": text_input}, {"image": image_path}],
}
response = requests.post(url + "/v1/embeddings", json=payload).json()
print("Embeddings:", [x.get("embedding") for x in response.get("data", [])])
"""
Usage:
1) Launch the server in one terminal:
python -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --port 30000
2) Run this script in another terminal:
python openai_chat_with_response_prefill.py
This example demonstrates two chat completion calls:
- One with continue_final_message enabled (the final assistant message is used as a prefill).
- One without continue_final_message (the final assistant message remains, starting a new turn).
"""
import openai
client = openai.Client(base_url="http://127.0.0.1:30000/v1", api_key="EMPTY")
messages = [
{"role": "system", "content": "You are a helpful AI assistant."},
{
"role": "user",
"content": """
Extract the name, size, price, and color from this product description as a JSON object:
<description>
The SmartHome Mini is a compact smart home assistant available in black or white for only $49.99.
At just 5 inches wide, it lets you control lights, thermostats, and other connected devices via voice or app—
no matter where you place it in your home.
This affordable little hub brings convenient hands-free control to your smart devices.
</description>
""",
},
{"role": "assistant", "content": "{\n"},
]
# Calling the API with continue_final_message enabled.
print("=== Prefill with continue_final_messagem ===")
response_with = client.chat.completions.create(
model="meta-llama/Llama-3.1-8B-Instruct",
messages=messages,
temperature=0,
extra_body={"continue_final_message": True},
)
print(response_with.choices[0].message.content)
# Calling the API without continue_final_message (using default behavior).
print("\n=== Prefill without continue_final_message ===")
response_without = client.chat.completions.create(
model="meta-llama/Llama-3.1-8B-Instruct",
messages=messages,
temperature=0,
)
print(response_without.choices[0].message.content)
# launch server
# python -m sglang.launch_server --model LxzGordon/URM-LLaMa-3.1-8B --is-embedding
import requests
url = "http://127.0.0.1:30000"
PROMPT = (
"What is the range of the numeric output of a sigmoid node in a neural network?"
)
RESPONSE1 = "The output of a sigmoid node is bounded between -1 and 1."
RESPONSE2 = "The output of a sigmoid node is bounded between 0 and 1."
json_data = {
"conv": [
[
{"role": "user", "content": PROMPT},
{"role": "assistant", "content": RESPONSE1},
],
[
{"role": "user", "content": PROMPT},
{"role": "assistant", "content": RESPONSE2},
],
],
}
response = requests.post(
url + "/classify",
json=json_data,
).json()
print(response)
print("scores:", [x["embedding"] for x in response])
"""
This example demonstrates how to provide tokenized ids to LLM as input instead of text prompt, i.e. a token-in-token-out workflow.
"""
import sglang as sgl
from sglang.srt.hf_transformers_utils import get_tokenizer
MODEL_PATH = "meta-llama/Llama-3.1-8B-Instruct"
def main():
# Sample prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create a sampling params object.
sampling_params = {"temperature": 0.8, "top_p": 0.95}
# Tokenize inputs
tokenizer = get_tokenizer(MODEL_PATH)
token_ids_list = [tokenizer.encode(prompt) for prompt in prompts]
# Create an LLM.
llm = sgl.Engine(model_path=MODEL_PATH, skip_tokenizer_init=True)
outputs = llm.generate(input_ids=token_ids_list, sampling_params=sampling_params)
# Print the outputs.
for prompt, output in zip(prompts, outputs):
decode_output = tokenizer.decode(output["output_ids"])
print("===============================")
print(
f"Prompt: {prompt}\nGenerated token ids: {output['output_ids']}\nGenerated text: {decode_output}"
)
print()
# The __main__ condition is necessary here because we use "spawn" to create subprocesses
# Spawn starts a fresh program every time, if there is no __main__, it will run into infinite loop to keep spawning processes from sgl.Engine
if __name__ == "__main__":
main()
"""
Usage:
python token_in_token_out_llm_server.py
"""
import requests
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import is_in_ci
from sglang.utils import terminate_process, wait_for_server
if is_in_ci():
from docs.backend.patch import launch_server_cmd
else:
from sglang.utils import launch_server_cmd
MODEL_PATH = "meta-llama/Llama-3.1-8B-Instruct"
def main():
# Launch the server
server_process, port = launch_server_cmd(
f"python -m sglang.launch_server --model-path {MODEL_PATH} --skip-tokenizer-init --host 0.0.0.0"
)
wait_for_server(f"http://localhost:{port}")
# Sample prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create a sampling params object.
sampling_params = {"temperature": 0.8, "top_p": 0.95}
# Tokenize inputs
tokenizer = get_tokenizer(MODEL_PATH)
token_ids_list = [tokenizer.encode(prompt) for prompt in prompts]
json_data = {
"input_ids": token_ids_list,
"sampling_params": sampling_params,
}
response = requests.post(
f"http://localhost:{port}/generate",
json=json_data,
)
outputs = response.json()
for prompt, output in zip(prompts, outputs):
print("===============================")
decode_output = tokenizer.decode(output["output_ids"])
print(
f"Prompt: {prompt}\nGenerated token ids: {output['output_ids']}\nGenerated text: {decode_output}"
)
print()
terminate_process(server_process)
if __name__ == "__main__":
main()
import argparse
import dataclasses
from typing import Tuple
from transformers import AutoProcessor
from sglang import Engine
from sglang.lang.chat_template import get_chat_template_by_model_path
from sglang.srt.configs.model_config import ModelConfig
from sglang.srt.server_args import ServerArgs
from sglang.test.test_utils import DEFAULT_IMAGE_URL
def get_input_ids(
server_args: ServerArgs, model_config: ModelConfig
) -> Tuple[list[int], list]:
chat_template = get_chat_template_by_model_path(model_config.model_path)
text = f"{chat_template.image_token}What is in this picture?"
image_data = [DEFAULT_IMAGE_URL]
processor = AutoProcessor.from_pretrained(
model_config.model_path, trust_remote_code=server_args.trust_remote_code
)
input_ids = (
processor.tokenizer(
text=[text],
return_tensors="pt",
)
.input_ids[0]
.tolist()
)
return input_ids, image_data
def token_in_out_example(
server_args: ServerArgs,
):
input_ids, image_data = get_input_ids(
server_args,
ModelConfig(
server_args.model_path,
trust_remote_code=server_args.trust_remote_code,
model_override_args=server_args.json_model_override_args,
),
)
backend = Engine(**dataclasses.asdict(server_args))
output = backend.generate(
input_ids=input_ids,
image_data=image_data,
sampling_params={
"temperature": 0.8,
"max_new_tokens": 32,
},
)
print("===============================")
print(f"Output token ids: ", output["output_ids"])
backend.shutdown()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
ServerArgs.add_cli_args(parser)
args = [
"--model-path=Qwen/Qwen2-VL-2B",
]
args = parser.parse_args(args=args)
server_args = ServerArgs.from_cli_args(args)
server_args.skip_tokenizer_init = True
token_in_out_example(server_args)
"""
Usage:
python token_in_token_out_vlm_server.py
"""
from typing import Tuple
import requests
from transformers import AutoProcessor
from sglang.lang.chat_template import get_chat_template_by_model_path
from sglang.test.test_utils import DEFAULT_IMAGE_URL, is_in_ci
from sglang.utils import terminate_process, wait_for_server
if is_in_ci():
from docs.backend.patch import launch_server_cmd
else:
from sglang.utils import launch_server_cmd
MODEL_PATH = "Qwen/Qwen2-VL-2B"
def get_input_ids() -> Tuple[list[int], list]:
chat_template = get_chat_template_by_model_path(MODEL_PATH)
text = f"{chat_template.image_token}What is in this picture?"
image_data = [DEFAULT_IMAGE_URL]
processor = AutoProcessor.from_pretrained(MODEL_PATH)
input_ids = (
processor.tokenizer(
text=[text],
return_tensors="pt",
)
.input_ids[0]
.tolist()
)
return input_ids, image_data
def main():
# Launch the server
server_process, port = launch_server_cmd(
f"python -m sglang.launch_server --model-path {MODEL_PATH} --skip-tokenizer-init --host 0.0.0.0"
)
wait_for_server(f"http://localhost:{port}")
input_ids, image_data = get_input_ids()
sampling_params = {
"temperature": 0.8,
"max_new_tokens": 32,
}
json_data = {
"input_ids": input_ids,
"image_data": image_data,
"sampling_params": sampling_params,
}
response = requests.post(
f"http://localhost:{port}/generate",
json=json_data,
)
output = response.json()
print("===============================")
print(f"Output token ids: ", output["output_ids"])
terminate_process(server_process)
if __name__ == "__main__":
main()
"""
Usage:
python -m sglang.launch_server --model meta-llama/Llama-2-7b-hf --port 30000
python vertex_predict.py
This example shows the request and response formats of the prediction route for
Google Cloud Vertex AI Online Predictions.
Vertex AI SDK for Python is recommended for deploying models to Vertex AI
instead of a local server. After deploying the model to a Vertex AI Online
Prediction Endpoint, send requests via the Python SDK:
response = endpoint.predict(
instances=[
{"text": "The capital of France is"},
{"text": "What is a car?"},
],
parameters={"sampling_params": {"max_new_tokens": 16}},
)
print(response.predictions)
More details about get online predictions from Vertex AI can be found at
https://cloud.google.com/vertex-ai/docs/predictions/get-online-predictions.
"""
from dataclasses import dataclass
from typing import List, Optional
import requests
@dataclass
class VertexPrediction:
predictions: List
class LocalVertexEndpoint:
def __init__(self) -> None:
self.base_url = "http://127.0.0.1:30000"
def predict(self, instances: List[dict], parameters: Optional[dict] = None):
response = requests.post(
self.base_url + "/vertex_generate",
json={
"instances": instances,
"parameters": parameters,
},
)
return VertexPrediction(predictions=response.json()["predictions"])
endpoint = LocalVertexEndpoint()
# Predict with a single prompt.
response = endpoint.predict(instances=[{"text": "The capital of France is"}])
print(response.predictions)
# Predict with multiple prompts and parameters.
response = endpoint.predict(
instances=[
{"text": "The capital of France is"},
{"text": "What is a car?"},
],
parameters={"sampling_params": {"max_new_tokens": 16}},
)
print(response.predictions)
{
"name": "sglang",
"lockfileVersion": 3,
"requires": true,
"packages": {}
}
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "sglang"
version = "0.5.2rc1"
description = "SGLang is yet another fast serving framework for large language models and vision language models."
readme = "README.md"
requires-python = ">=3.10"
license = { file = "LICENSE" }
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
]
dependencies = ["aiohttp", "requests", "tqdm", "numpy", "IPython", "setproctitle"]
[project.optional-dependencies]
runtime_common = [
"blobfile==3.0.0",
"build",
"compressed-tensors",
"datasets",
"einops",
"fastapi",
"hf_transfer",
"huggingface_hub",
"interegular",
"llguidance>=0.7.11,<0.8.0",
"modelscope",
"msgspec",
"ninja",
"openai==1.99.1",
"openai-harmony==0.0.4",
"orjson",
"outlines==0.1.11",
"packaging",
"partial_json_parser",
"pillow",
"prometheus-client>=0.20.0",
"psutil",
"pybase64",
"pydantic",
"pynvml",
"python-multipart",
"pyzmq>=25.1.2",
"sentencepiece",
"soundfile==0.13.1",
"scipy",
"timm==1.0.16",
"tiktoken",
"torchao==0.9.0",
"transformers==4.56.0",
"uvicorn",
"uvloop",
"xgrammar==0.1.23",
]
srt = [
"sglang[runtime_common]",
"sgl-kernel==0.3.8",
"torch==2.8.0",
"torchaudio==2.8.0",
"torchvision",
"cuda-python",
"flashinfer_python==0.3.0",
]
blackwell = [
"sglang[runtime_common]",
"sgl-kernel",
"torch==2.8.0",
"torchaudio==2.8.0",
"torchvision",
"cuda-python",
"flashinfer_python==0.3.0",
]
# HIP (Heterogeneous-computing Interface for Portability) for AMD
# => base docker rocm/vllm-dev:20250114, not from public vllm whl
srt_hip = [
"sglang[runtime_common]",
"torch",
"petit_kernel==0.0.2",
"wave-lang==1.0.1",
]
# https://docs.sglang.ai/platforms/cpu_server.html
srt_cpu = ["sglang[runtime_common]"]
# https://docs.sglang.ai/platforms/ascend_npu.html
srt_npu = ["sglang[runtime_common]"]
# xpu is not enabled in public vllm and torch whl,
# need to follow https://docs.vllm.ai/en/latest/getting_started/xpu-installation.htmlinstall vllm
srt_xpu = ["sglang[runtime_common]"]
# For Intel Gaudi(device : hpu) follow the installation guide
# https://docs.vllm.ai/en/latest/getting_started/gaudi-installation.html
srt_hpu = ["sglang[runtime_common]"]
openai = ["openai==1.99.1", "tiktoken"]
anthropic = ["anthropic>=0.20.0"]
litellm = ["litellm>=1.0.0"]
torch_memory_saver = ["torch_memory_saver==0.0.8"]
decord = ["decord"]
test = [
"accelerate",
"expecttest",
"jsonlines",
"matplotlib",
"pandas",
"peft",
"sentence_transformers",
"pytest",
"tabulate",
]
all = ["sglang[srt]", "sglang[openai]", "sglang[anthropic]", "sglang[torch_memory_saver]", "sglang[decord]"]
all_hip = ["sglang[srt_hip]", "sglang[openai]", "sglang[anthropic]", "sglang[decord]"]
all_xpu = ["sglang[srt_xpu]", "sglang[openai]", "sglang[anthropic]", "sglang[decord]"]
all_hpu = ["sglang[srt_hpu]", "sglang[openai]", "sglang[anthropic]", "sglang[decord]"]
all_cpu = ["sglang[srt_cpu]", "sglang[openai]", "sglang[anthropic]", "sglang[decord]"]
all_npu = ["sglang[srt_npu]", "sglang[openai]", "sglang[anthropic]", "sglang[decord]"]
dev = ["sglang[all]", "sglang[test]"]
dev_hip = ["sglang[all_hip]", "sglang[test]"]
dev_xpu = ["sglang[all_xpu]", "sglang[test]"]
dev_hpu = ["sglang[all_hpu]", "sglang[test]"]
dev_cpu = ["sglang[all_cpu]", "sglang[test]"]
[project.urls]
"Homepage" = "https://github.com/sgl-project/sglang"
"Bug Tracker" = "https://github.com/sgl-project/sglang/issues"
[tool.setuptools.package-data]
"sglang" = [
"srt/layers/moe/fused_moe_triton/configs/*/*.json",
"srt/layers/quantization/configs/*.json",
"srt/mem_cache/storage/hf3fs/hf3fs_utils.cpp",
]
[tool.setuptools.packages.find]
exclude = [
"assets*",
"benchmark*",
"docs*",
"dist*",
"playground*",
"scripts*",
"tests*",
]
[tool.wheel]
exclude = [
"assets*",
"benchmark*",
"docs*",
"dist*",
"playground*",
"scripts*",
"tests*",
]
[tool.codespell]
ignore-words-list = "ans, als, hel, boostrap, childs, te, vas, hsa, ment"
skip = "*.json,*.jsonl,*.patch,*.txt"
Metadata-Version: 2.4
Name: sglang
Version: 0.5.2rc1
Summary: SGLang is yet another fast serving framework for large language models and vision language models.
Project-URL: Homepage, https://github.com/sgl-project/sglang
Project-URL: Bug Tracker, https://github.com/sgl-project/sglang/issues
Classifier: Programming Language :: Python :: 3
Classifier: License :: OSI Approved :: Apache Software License
Requires-Python: >=3.10
Description-Content-Type: text/markdown
Requires-Dist: aiohttp
Requires-Dist: requests
Requires-Dist: tqdm
Requires-Dist: numpy
Requires-Dist: IPython
Requires-Dist: setproctitle
Provides-Extra: runtime-common
Requires-Dist: blobfile==3.0.0; extra == "runtime-common"
Requires-Dist: build; extra == "runtime-common"
Requires-Dist: compressed-tensors; extra == "runtime-common"
Requires-Dist: datasets; extra == "runtime-common"
Requires-Dist: einops; extra == "runtime-common"
Requires-Dist: fastapi; extra == "runtime-common"
Requires-Dist: hf_transfer; extra == "runtime-common"
Requires-Dist: huggingface_hub; extra == "runtime-common"
Requires-Dist: interegular; extra == "runtime-common"
Requires-Dist: llguidance<0.8.0,>=0.7.11; extra == "runtime-common"
Requires-Dist: modelscope; extra == "runtime-common"
Requires-Dist: msgspec; extra == "runtime-common"
Requires-Dist: ninja; extra == "runtime-common"
Requires-Dist: openai==1.99.1; extra == "runtime-common"
Requires-Dist: openai-harmony==0.0.4; extra == "runtime-common"
Requires-Dist: orjson; extra == "runtime-common"
Requires-Dist: outlines==0.1.11; extra == "runtime-common"
Requires-Dist: packaging; extra == "runtime-common"
Requires-Dist: partial_json_parser; extra == "runtime-common"
Requires-Dist: pillow; extra == "runtime-common"
Requires-Dist: prometheus-client>=0.20.0; extra == "runtime-common"
Requires-Dist: psutil; extra == "runtime-common"
Requires-Dist: pybase64; extra == "runtime-common"
Requires-Dist: pydantic; extra == "runtime-common"
Requires-Dist: pynvml; extra == "runtime-common"
Requires-Dist: python-multipart; extra == "runtime-common"
Requires-Dist: pyzmq>=25.1.2; extra == "runtime-common"
Requires-Dist: sentencepiece; extra == "runtime-common"
Requires-Dist: soundfile==0.13.1; extra == "runtime-common"
Requires-Dist: scipy; extra == "runtime-common"
Requires-Dist: timm==1.0.16; extra == "runtime-common"
Requires-Dist: tiktoken; extra == "runtime-common"
Requires-Dist: torchao==0.9.0; extra == "runtime-common"
Requires-Dist: transformers==4.56.0; extra == "runtime-common"
Requires-Dist: uvicorn; extra == "runtime-common"
Requires-Dist: uvloop; extra == "runtime-common"
Requires-Dist: xgrammar==0.1.23; extra == "runtime-common"
Provides-Extra: srt
Requires-Dist: sglang[runtime_common]; extra == "srt"
Requires-Dist: sgl-kernel==0.3.8; extra == "srt"
Requires-Dist: torch==2.8.0; extra == "srt"
Requires-Dist: torchaudio==2.8.0; extra == "srt"
Requires-Dist: torchvision; extra == "srt"
Requires-Dist: cuda-python; extra == "srt"
Requires-Dist: flashinfer_python==0.3.0; extra == "srt"
Provides-Extra: blackwell
Requires-Dist: sglang[runtime_common]; extra == "blackwell"
Requires-Dist: sgl-kernel; extra == "blackwell"
Requires-Dist: torch==2.8.0; extra == "blackwell"
Requires-Dist: torchaudio==2.8.0; extra == "blackwell"
Requires-Dist: torchvision; extra == "blackwell"
Requires-Dist: cuda-python; extra == "blackwell"
Requires-Dist: flashinfer_python==0.3.0; extra == "blackwell"
Provides-Extra: srt-hip
Requires-Dist: sglang[runtime_common]; extra == "srt-hip"
Requires-Dist: torch; extra == "srt-hip"
Requires-Dist: petit_kernel==0.0.2; extra == "srt-hip"
Requires-Dist: wave-lang==1.0.1; extra == "srt-hip"
Provides-Extra: srt-cpu
Requires-Dist: sglang[runtime_common]; extra == "srt-cpu"
Provides-Extra: srt-npu
Requires-Dist: sglang[runtime_common]; extra == "srt-npu"
Provides-Extra: srt-xpu
Requires-Dist: sglang[runtime_common]; extra == "srt-xpu"
Provides-Extra: srt-hpu
Requires-Dist: sglang[runtime_common]; extra == "srt-hpu"
Provides-Extra: openai
Requires-Dist: openai==1.99.1; extra == "openai"
Requires-Dist: tiktoken; extra == "openai"
Provides-Extra: anthropic
Requires-Dist: anthropic>=0.20.0; extra == "anthropic"
Provides-Extra: litellm
Requires-Dist: litellm>=1.0.0; extra == "litellm"
Provides-Extra: torch-memory-saver
Requires-Dist: torch_memory_saver==0.0.8; extra == "torch-memory-saver"
Provides-Extra: decord
Requires-Dist: decord; extra == "decord"
Provides-Extra: test
Requires-Dist: accelerate; extra == "test"
Requires-Dist: expecttest; extra == "test"
Requires-Dist: jsonlines; extra == "test"
Requires-Dist: matplotlib; extra == "test"
Requires-Dist: pandas; extra == "test"
Requires-Dist: peft; extra == "test"
Requires-Dist: sentence_transformers; extra == "test"
Requires-Dist: pytest; extra == "test"
Requires-Dist: tabulate; extra == "test"
Provides-Extra: all
Requires-Dist: sglang[srt]; extra == "all"
Requires-Dist: sglang[openai]; extra == "all"
Requires-Dist: sglang[anthropic]; extra == "all"
Requires-Dist: sglang[torch_memory_saver]; extra == "all"
Requires-Dist: sglang[decord]; extra == "all"
Provides-Extra: all-hip
Requires-Dist: sglang[srt_hip]; extra == "all-hip"
Requires-Dist: sglang[openai]; extra == "all-hip"
Requires-Dist: sglang[anthropic]; extra == "all-hip"
Requires-Dist: sglang[decord]; extra == "all-hip"
Provides-Extra: all-xpu
Requires-Dist: sglang[srt_xpu]; extra == "all-xpu"
Requires-Dist: sglang[openai]; extra == "all-xpu"
Requires-Dist: sglang[anthropic]; extra == "all-xpu"
Requires-Dist: sglang[decord]; extra == "all-xpu"
Provides-Extra: all-hpu
Requires-Dist: sglang[srt_hpu]; extra == "all-hpu"
Requires-Dist: sglang[openai]; extra == "all-hpu"
Requires-Dist: sglang[anthropic]; extra == "all-hpu"
Requires-Dist: sglang[decord]; extra == "all-hpu"
Provides-Extra: all-cpu
Requires-Dist: sglang[srt_cpu]; extra == "all-cpu"
Requires-Dist: sglang[openai]; extra == "all-cpu"
Requires-Dist: sglang[anthropic]; extra == "all-cpu"
Requires-Dist: sglang[decord]; extra == "all-cpu"
Provides-Extra: all-npu
Requires-Dist: sglang[srt_npu]; extra == "all-npu"
Requires-Dist: sglang[openai]; extra == "all-npu"
Requires-Dist: sglang[anthropic]; extra == "all-npu"
Requires-Dist: sglang[decord]; extra == "all-npu"
Provides-Extra: dev
Requires-Dist: sglang[all]; extra == "dev"
Requires-Dist: sglang[test]; extra == "dev"
Provides-Extra: dev-hip
Requires-Dist: sglang[all_hip]; extra == "dev-hip"
Requires-Dist: sglang[test]; extra == "dev-hip"
Provides-Extra: dev-xpu
Requires-Dist: sglang[all_xpu]; extra == "dev-xpu"
Requires-Dist: sglang[test]; extra == "dev-xpu"
Provides-Extra: dev-hpu
Requires-Dist: sglang[all_hpu]; extra == "dev-hpu"
Requires-Dist: sglang[test]; extra == "dev-hpu"
Provides-Extra: dev-cpu
Requires-Dist: sglang[all_cpu]; extra == "dev-cpu"
Requires-Dist: sglang[test]; extra == "dev-cpu"
pyproject.toml
sglang/__init__.py
sglang/bench_offline_throughput.py
sglang/bench_one_batch.py
sglang/bench_one_batch_server.py
sglang/bench_serving.py
sglang/check_env.py
sglang/compile_deep_gemm.py
sglang/global_config.py
sglang/launch_server.py
sglang/profiler.py
sglang/utils.py
sglang/version.py
sglang.egg-info/PKG-INFO
sglang.egg-info/SOURCES.txt
sglang.egg-info/dependency_links.txt
sglang.egg-info/requires.txt
sglang.egg-info/top_level.txt
sglang/eval/llama3_eval.py
sglang/eval/loogle_eval.py
sglang/lang/api.py
sglang/lang/chat_template.py
sglang/lang/choices.py
sglang/lang/compiler.py
sglang/lang/interpreter.py
sglang/lang/ir.py
sglang/lang/tracer.py
sglang/lang/backend/anthropic.py
sglang/lang/backend/base_backend.py
sglang/lang/backend/litellm.py
sglang/lang/backend/openai.py
sglang/lang/backend/runtime_endpoint.py
sglang/lang/backend/vertexai.py
sglang/srt/_custom_ops.py
sglang/srt/aio_rwlock.py
sglang/srt/bench_utils.py
sglang/srt/constants.py
sglang/srt/custom_op.py
sglang/srt/hf_transformers_utils.py
sglang/srt/host_shared_memory.py
sglang/srt/offloader.py
sglang/srt/operations.py
sglang/srt/operations_strategy.py
sglang/srt/patch_torch.py
sglang/srt/poll_based_barrier.py
sglang/srt/server_args.py
sglang/srt/torch_memory_saver_adapter.py
sglang/srt/two_batch_overlap.py
sglang/srt/utils.py
sglang/srt/warmup.py
sglang/srt/configs/__init__.py
sglang/srt/configs/chatglm.py
sglang/srt/configs/dbrx.py
sglang/srt/configs/deepseekvl2.py
sglang/srt/configs/device_config.py
sglang/srt/configs/exaone.py
sglang/srt/configs/internvl.py
sglang/srt/configs/janus_pro.py
sglang/srt/configs/kimi_vl.py
sglang/srt/configs/kimi_vl_moonvit.py
sglang/srt/configs/load_config.py
sglang/srt/configs/longcat_flash.py
sglang/srt/configs/model_config.py
sglang/srt/configs/step3_vl.py
sglang/srt/configs/update_config.py
sglang/srt/configs/utils.py
sglang/srt/connector/__init__.py
sglang/srt/connector/base_connector.py
sglang/srt/connector/redis.py
sglang/srt/connector/s3.py
sglang/srt/connector/utils.py
sglang/srt/connector/serde/__init__.py
sglang/srt/connector/serde/safe_serde.py
sglang/srt/connector/serde/serde.py
sglang/srt/constrained/base_grammar_backend.py
sglang/srt/constrained/llguidance_backend.py
sglang/srt/constrained/outlines_backend.py
sglang/srt/constrained/outlines_jump_forward.py
sglang/srt/constrained/reasoner_grammar_backend.py
sglang/srt/constrained/xgrammar_backend.py
sglang/srt/constrained/triton_ops/bitmask_ops.py
sglang/srt/debug_utils/__init__.py
sglang/srt/debug_utils/dump_comparator.py
sglang/srt/debug_utils/dumper.py
sglang/srt/debug_utils/text_comparator.py
sglang/srt/disaggregation/decode.py
sglang/srt/disaggregation/decode_schedule_batch_mixin.py
sglang/srt/disaggregation/kv_events.py
sglang/srt/disaggregation/launch_lb.py
sglang/srt/disaggregation/mini_lb.py
sglang/srt/disaggregation/prefill.py
sglang/srt/disaggregation/utils.py
sglang/srt/disaggregation/ascend/__init__.py
sglang/srt/disaggregation/ascend/conn.py
sglang/srt/disaggregation/ascend/transfer_engine.py
sglang/srt/disaggregation/base/__init__.py
sglang/srt/disaggregation/base/conn.py
sglang/srt/disaggregation/common/__init__.py
sglang/srt/disaggregation/common/conn.py
sglang/srt/disaggregation/common/utils.py
sglang/srt/disaggregation/fake/__init__.py
sglang/srt/disaggregation/fake/conn.py
sglang/srt/disaggregation/mooncake/__init__.py
sglang/srt/disaggregation/mooncake/conn.py
sglang/srt/disaggregation/mooncake/transfer_engine.py
sglang/srt/disaggregation/nixl/__init__.py
sglang/srt/disaggregation/nixl/conn.py
sglang/srt/distributed/__init__.py
sglang/srt/distributed/communication_op.py
sglang/srt/distributed/naive_distributed.py
sglang/srt/distributed/parallel_state.py
sglang/srt/distributed/utils.py
sglang/srt/distributed/device_communicators/cuda_wrapper.py
sglang/srt/distributed/device_communicators/custom_all_reduce.py
sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py
sglang/srt/distributed/device_communicators/hpu_communicator.py
sglang/srt/distributed/device_communicators/npu_communicator.py
sglang/srt/distributed/device_communicators/pymscclpp.py
sglang/srt/distributed/device_communicators/pynccl.py
sglang/srt/distributed/device_communicators/pynccl_allocator.py
sglang/srt/distributed/device_communicators/pynccl_wrapper.py
sglang/srt/distributed/device_communicators/quick_all_reduce.py
sglang/srt/distributed/device_communicators/shm_broadcast.py
sglang/srt/distributed/device_communicators/xpu_communicator.py
sglang/srt/entrypoints/EngineBase.py
sglang/srt/entrypoints/context.py
sglang/srt/entrypoints/engine.py
sglang/srt/entrypoints/harmony_utils.py
sglang/srt/entrypoints/http_server.py
sglang/srt/entrypoints/http_server_engine.py
sglang/srt/entrypoints/tool.py
sglang/srt/entrypoints/openai/__init__.py
sglang/srt/entrypoints/openai/protocol.py
sglang/srt/entrypoints/openai/serving_base.py
sglang/srt/entrypoints/openai/serving_chat.py
sglang/srt/entrypoints/openai/serving_completions.py
sglang/srt/entrypoints/openai/serving_embedding.py
sglang/srt/entrypoints/openai/serving_rerank.py
sglang/srt/entrypoints/openai/serving_responses.py
sglang/srt/entrypoints/openai/serving_score.py
sglang/srt/entrypoints/openai/tool_server.py
sglang/srt/entrypoints/openai/usage_processor.py
sglang/srt/entrypoints/openai/utils.py
sglang/srt/eplb/__init__.py
sglang/srt/eplb/eplb_manager.py
sglang/srt/eplb/expert_distribution.py
sglang/srt/eplb/expert_location.py
sglang/srt/eplb/expert_location_dispatch.py
sglang/srt/eplb/expert_location_updater.py
sglang/srt/eplb/eplb_algorithms/__init__.py
sglang/srt/eplb/eplb_algorithms/deepseek.py
sglang/srt/eplb/eplb_algorithms/deepseek_vec.py
sglang/srt/eplb/eplb_simulator/__init__.py
sglang/srt/eplb/eplb_simulator/reader.py
sglang/srt/function_call/base_format_detector.py
sglang/srt/function_call/core_types.py
sglang/srt/function_call/deepseekv31_detector.py
sglang/srt/function_call/deepseekv3_detector.py
sglang/srt/function_call/ebnf_composer.py
sglang/srt/function_call/function_call_parser.py
sglang/srt/function_call/glm4_moe_detector.py
sglang/srt/function_call/gpt_oss_detector.py
sglang/srt/function_call/kimik2_detector.py
sglang/srt/function_call/llama32_detector.py
sglang/srt/function_call/mistral_detector.py
sglang/srt/function_call/pythonic_detector.py
sglang/srt/function_call/qwen25_detector.py
sglang/srt/function_call/qwen3_coder_detector.py
sglang/srt/function_call/step3_detector.py
sglang/srt/function_call/utils.py
sglang/srt/layers/activation.py
sglang/srt/layers/amx_utils.py
sglang/srt/layers/communicator.py
sglang/srt/layers/dp_attention.py
sglang/srt/layers/elementwise.py
sglang/srt/layers/flashinfer_comm_fusion.py
sglang/srt/layers/layernorm.py
sglang/srt/layers/linear.py
sglang/srt/layers/logits_processor.py
sglang/srt/layers/model_parallel.py
sglang/srt/layers/multimodal.py
sglang/srt/layers/parameter.py
sglang/srt/layers/pooler.py
sglang/srt/layers/radix_attention.py
sglang/srt/layers/rotary_embedding.py
sglang/srt/layers/sampler.py
sglang/srt/layers/torchao_utils.py
sglang/srt/layers/utils.py
sglang/srt/layers/vocab_parallel_embedding.py
sglang/srt/layers/attention/aiter_backend.py
sglang/srt/layers/attention/ascend_backend.py
sglang/srt/layers/attention/base_attn_backend.py
sglang/srt/layers/attention/cutlass_mla_backend.py
sglang/srt/layers/attention/double_sparsity_backend.py
sglang/srt/layers/attention/dual_chunk_flashattention_backend.py
sglang/srt/layers/attention/flashattention_backend.py
sglang/srt/layers/attention/flashinfer_backend.py
sglang/srt/layers/attention/flashinfer_mla_backend.py
sglang/srt/layers/attention/flashmla_backend.py
sglang/srt/layers/attention/hybrid_attn_backend.py
sglang/srt/layers/attention/intel_amx_backend.py
sglang/srt/layers/attention/merge_state.py
sglang/srt/layers/attention/tbo_backend.py
sglang/srt/layers/attention/torch_native_backend.py
sglang/srt/layers/attention/triton_backend.py
sglang/srt/layers/attention/trtllm_mha_backend.py
sglang/srt/layers/attention/trtllm_mla_backend.py
sglang/srt/layers/attention/utils.py
sglang/srt/layers/attention/vision.py
sglang/srt/layers/attention/vision_utils.py
sglang/srt/layers/attention/wave_backend.py
sglang/srt/layers/attention/triton_ops/decode_attention.py
sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py
sglang/srt/layers/attention/triton_ops/extend_attention.py
sglang/srt/layers/attention/triton_ops/merge_state.py
sglang/srt/layers/attention/triton_ops/prefill_attention.py
sglang/srt/layers/attention/triton_ops/rocm_mla_decode_rope.py
sglang/srt/layers/attention/wave_ops/decode_attention.py
sglang/srt/layers/attention/wave_ops/extend_attention.py
sglang/srt/layers/attention/wave_ops/prefill_attention.py
sglang/srt/layers/moe/__init__.py
sglang/srt/layers/moe/cutlass_moe.py
sglang/srt/layers/moe/cutlass_moe_params.py
sglang/srt/layers/moe/cutlass_w4a8_moe.py
sglang/srt/layers/moe/fused_moe_native.py
sglang/srt/layers/moe/rocm_moe_utils.py
sglang/srt/layers/moe/router.py
sglang/srt/layers/moe/topk.py
sglang/srt/layers/moe/utils.py
sglang/srt/layers/moe/ep_moe/__init__.py
sglang/srt/layers/moe/ep_moe/kernels.py
sglang/srt/layers/moe/ep_moe/layer.py
sglang/srt/layers/moe/fused_moe_triton/__init__.py
sglang/srt/layers/moe/fused_moe_triton/fused_moe.py
sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_config.py
sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_kernels.py
sglang/srt/layers/moe/fused_moe_triton/layer.py
sglang/srt/layers/moe/fused_moe_triton/moe_align_block_size.py
sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI300X.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI325X.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=AMD_Radeon_Graphics.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI300X.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI325X.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=AMD_Radeon_Graphics.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI300X.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI325X.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=AMD_Radeon_Graphics.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_L40S.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI300X.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI325X.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=AMD_Radeon_Graphics.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=192,device_name=NVIDIA_H20.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=192,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=96,device_name=NVIDIA_H20.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=129,N=352,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=160,N=320,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=161,N=192,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_0/E=16,N=1024,device_name=NVIDIA_B200.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=128,N=352,device_name=NVIDIA_RTX_6000_Ada_Generation,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=128,N=768,device_name=NVIDIA_H20.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=160,N=320,device_name=NVIDIA_H20-3e.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=160,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=160,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=384,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=384,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=385,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=385,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=129,N=352,device_name=NVIDIA_B200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=129,N=352,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Max-Q_Workstation_Edition,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=129,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=161,N=384,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Max-Q_Workstation_Edition,dtype=fp8_w8a8.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=257,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=257,N=64,device_name=NVIDIA_A100-SXM4-80GB.json
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/moe/moe_runner/__init__.py
sglang/srt/layers/moe/moe_runner/base.py
sglang/srt/layers/moe/token_dispatcher/__init__.py
sglang/srt/layers/moe/token_dispatcher/base_dispatcher.py
sglang/srt/layers/moe/token_dispatcher/deepep.py
sglang/srt/layers/moe/token_dispatcher/standard.py
sglang/srt/layers/quantization/__init__.py
sglang/srt/layers/quantization/awq.py
sglang/srt/layers/quantization/awq_triton.py
sglang/srt/layers/quantization/base_config.py
sglang/srt/layers/quantization/blockwise_int8.py
sglang/srt/layers/quantization/fp8.py
sglang/srt/layers/quantization/fp8_kernel.py
sglang/srt/layers/quantization/fp8_utils.py
sglang/srt/layers/quantization/fpgemm_fp8.py
sglang/srt/layers/quantization/gptq.py
sglang/srt/layers/quantization/int8_kernel.py
sglang/srt/layers/quantization/int8_utils.py
sglang/srt/layers/quantization/kv_cache.py
sglang/srt/layers/quantization/marlin_utils.py
sglang/srt/layers/quantization/marlin_utils_fp8.py
sglang/srt/layers/quantization/modelopt_quant.py
sglang/srt/layers/quantization/moe_wna16.py
sglang/srt/layers/quantization/mxfp4.py
sglang/srt/layers/quantization/mxfp4_tensor.py
sglang/srt/layers/quantization/petit.py
sglang/srt/layers/quantization/petit_utils.py
sglang/srt/layers/quantization/qoq.py
sglang/srt/layers/quantization/unquant.py
sglang/srt/layers/quantization/utils.py
sglang/srt/layers/quantization/w4afp8.py
sglang/srt/layers/quantization/w8a8_fp8.py
sglang/srt/layers/quantization/w8a8_int8.py
sglang/srt/layers/quantization/compressed_tensors/__init__.py
sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py
sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py
sglang/srt/layers/quantization/compressed_tensors/utils.py
sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py
sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py
sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py
sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py
sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json
sglang/srt/layers/quantization/deep_gemm_wrapper/__init__.py
sglang/srt/layers/quantization/deep_gemm_wrapper/compile_utils.py
sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py
sglang/srt/layers/quantization/deep_gemm_wrapper/entrypoint.py
sglang/srt/layers/quantization/quark/__init__.py
sglang/srt/layers/quantization/quark/quark.py
sglang/srt/layers/quantization/quark/quark_moe.py
sglang/srt/layers/quantization/quark/utils.py
sglang/srt/layers/quantization/quark/schemes/__init__.py
sglang/srt/layers/quantization/quark/schemes/quark_scheme.py
sglang/srt/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py
sglang/srt/lora/layers.py
sglang/srt/lora/lora.py
sglang/srt/lora/lora_config.py
sglang/srt/lora/lora_manager.py
sglang/srt/lora/lora_registry.py
sglang/srt/lora/mem_pool.py
sglang/srt/lora/utils.py
sglang/srt/lora/backend/base_backend.py
sglang/srt/lora/backend/triton_backend.py
sglang/srt/lora/triton_ops/__init__.py
sglang/srt/lora/triton_ops/gate_up_lora_b.py
sglang/srt/lora/triton_ops/qkv_lora_b.py
sglang/srt/lora/triton_ops/sgemm_lora_a.py
sglang/srt/lora/triton_ops/sgemm_lora_b.py
sglang/srt/managers/cache_controller.py
sglang/srt/managers/configure_logging.py
sglang/srt/managers/data_parallel_controller.py
sglang/srt/managers/detokenizer_manager.py
sglang/srt/managers/io_struct.py
sglang/srt/managers/mm_utils.py
sglang/srt/managers/multi_tokenizer_mixin.py
sglang/srt/managers/multimodal_processor.py
sglang/srt/managers/schedule_batch.py
sglang/srt/managers/schedule_policy.py
sglang/srt/managers/scheduler.py
sglang/srt/managers/scheduler_input_blocker.py
sglang/srt/managers/scheduler_metrics_mixin.py
sglang/srt/managers/scheduler_output_processor_mixin.py
sglang/srt/managers/scheduler_profiler_mixin.py
sglang/srt/managers/scheduler_recv_skipper.py
sglang/srt/managers/scheduler_update_weights_mixin.py
sglang/srt/managers/session_controller.py
sglang/srt/managers/template_manager.py
sglang/srt/managers/tokenizer_manager.py
sglang/srt/managers/tp_worker.py
sglang/srt/managers/tp_worker_overlap_thread.py
sglang/srt/managers/utils.py
sglang/srt/mem_cache/allocator.py
sglang/srt/mem_cache/allocator_ascend.py
sglang/srt/mem_cache/base_prefix_cache.py
sglang/srt/mem_cache/chunk_cache.py
sglang/srt/mem_cache/flush_cache.py
sglang/srt/mem_cache/hicache_storage.py
sglang/srt/mem_cache/hiradix_cache.py
sglang/srt/mem_cache/lora_radix_cache.py
sglang/srt/mem_cache/memory_pool.py
sglang/srt/mem_cache/memory_pool_host.py
sglang/srt/mem_cache/multimodal_cache.py
sglang/srt/mem_cache/radix_cache.py
sglang/srt/mem_cache/radix_cache_cpp.py
sglang/srt/mem_cache/swa_radix_cache.py
sglang/srt/mem_cache/cpp_radix_tree/radix_tree.py
sglang/srt/mem_cache/storage/hf3fs/client_hf3fs.py
sglang/srt/mem_cache/storage/hf3fs/hf3fs_utils.cpp
sglang/srt/mem_cache/storage/hf3fs/mini_3fs_metadata_server.py
sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py
sglang/srt/mem_cache/storage/hf3fs/test_hf3fs_utils.py
sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py
sglang/srt/mem_cache/storage/mooncake_store/unit_test.py
sglang/srt/mem_cache/storage/nixl/hicache_nixl.py
sglang/srt/mem_cache/storage/nixl/nixl_utils.py
sglang/srt/mem_cache/storage/nixl/test_hicache_nixl_storage.py
sglang/srt/metrics/collector.py
sglang/srt/metrics/func_timer.py
sglang/srt/model_executor/cuda_graph_runner.py
sglang/srt/model_executor/forward_batch_info.py
sglang/srt/model_executor/model_runner.py
sglang/srt/model_executor/npu_graph_runner.py
sglang/srt/model_loader/__init__.py
sglang/srt/model_loader/loader.py
sglang/srt/model_loader/utils.py
sglang/srt/model_loader/weight_utils.py
sglang/srt/models/arcee.py
sglang/srt/models/baichuan.py
sglang/srt/models/bailing_moe.py
sglang/srt/models/bert.py
sglang/srt/models/chatglm.py
sglang/srt/models/clip.py
sglang/srt/models/commandr.py
sglang/srt/models/dbrx.py
sglang/srt/models/deepseek.py
sglang/srt/models/deepseek_janus_pro.py
sglang/srt/models/deepseek_nextn.py
sglang/srt/models/deepseek_v2.py
sglang/srt/models/deepseek_vl2.py
sglang/srt/models/ernie4.py
sglang/srt/models/ernie4_eagle.py
sglang/srt/models/exaone.py
sglang/srt/models/gemma.py
sglang/srt/models/gemma2.py
sglang/srt/models/gemma2_reward.py
sglang/srt/models/gemma3_causal.py
sglang/srt/models/gemma3_mm.py
sglang/srt/models/gemma3n_audio.py
sglang/srt/models/gemma3n_causal.py
sglang/srt/models/gemma3n_mm.py
sglang/srt/models/glm4.py
sglang/srt/models/glm4_moe.py
sglang/srt/models/glm4_moe_nextn.py
sglang/srt/models/glm4v.py
sglang/srt/models/glm4v_moe.py
sglang/srt/models/gpt2.py
sglang/srt/models/gpt_bigcode.py
sglang/srt/models/gpt_oss.py
sglang/srt/models/granite.py
sglang/srt/models/granitemoe.py
sglang/srt/models/grok.py
sglang/srt/models/hunyuan.py
sglang/srt/models/idefics2.py
sglang/srt/models/internlm2.py
sglang/srt/models/internlm2_reward.py
sglang/srt/models/interns1.py
sglang/srt/models/internvl.py
sglang/srt/models/kimi_vl.py
sglang/srt/models/kimi_vl_moonvit.py
sglang/srt/models/llama.py
sglang/srt/models/llama4.py
sglang/srt/models/llama_classification.py
sglang/srt/models/llama_eagle.py
sglang/srt/models/llama_eagle3.py
sglang/srt/models/llama_embedding.py
sglang/srt/models/llama_reward.py
sglang/srt/models/llava.py
sglang/srt/models/llavavid.py
sglang/srt/models/longcat_flash.py
sglang/srt/models/longcat_flash_nextn.py
sglang/srt/models/mimo.py
sglang/srt/models/mimo_mtp.py
sglang/srt/models/minicpm.py
sglang/srt/models/minicpm3.py
sglang/srt/models/minicpmo.py
sglang/srt/models/minicpmv.py
sglang/srt/models/mistral.py
sglang/srt/models/mixtral.py
sglang/srt/models/mixtral_quant.py
sglang/srt/models/mllama.py
sglang/srt/models/mllama4.py
sglang/srt/models/nemotron_nas.py
sglang/srt/models/olmo.py
sglang/srt/models/olmo2.py
sglang/srt/models/olmoe.py
sglang/srt/models/persimmon.py
sglang/srt/models/phi.py
sglang/srt/models/phi3_small.py
sglang/srt/models/phi4mm.py
sglang/srt/models/phi4mm_audio.py
sglang/srt/models/phi4mm_utils.py
sglang/srt/models/phimoe.py
sglang/srt/models/pixtral.py
sglang/srt/models/qwen.py
sglang/srt/models/qwen2.py
sglang/srt/models/qwen2_5_vl.py
sglang/srt/models/qwen2_audio.py
sglang/srt/models/qwen2_classification.py
sglang/srt/models/qwen2_eagle.py
sglang/srt/models/qwen2_moe.py
sglang/srt/models/qwen2_rm.py
sglang/srt/models/qwen2_vl.py
sglang/srt/models/qwen3.py
sglang/srt/models/qwen3_classification.py
sglang/srt/models/qwen3_moe.py
sglang/srt/models/registry.py
sglang/srt/models/roberta.py
sglang/srt/models/siglip.py
sglang/srt/models/stablelm.py
sglang/srt/models/step3_vl.py
sglang/srt/models/torch_native_llama.py
sglang/srt/models/transformers.py
sglang/srt/models/vila.py
sglang/srt/models/xverse.py
sglang/srt/models/xverse_moe.py
sglang/srt/models/yivl.py
sglang/srt/multimodal/mm_utils.py
sglang/srt/multimodal/processors/base_processor.py
sglang/srt/multimodal/processors/clip.py
sglang/srt/multimodal/processors/deepseek_vl_v2.py
sglang/srt/multimodal/processors/gemma3.py
sglang/srt/multimodal/processors/gemma3n.py
sglang/srt/multimodal/processors/glm4v.py
sglang/srt/multimodal/processors/internvl.py
sglang/srt/multimodal/processors/janus_pro.py
sglang/srt/multimodal/processors/kimi_vl.py
sglang/srt/multimodal/processors/llava.py
sglang/srt/multimodal/processors/minicpm.py
sglang/srt/multimodal/processors/mlama.py
sglang/srt/multimodal/processors/mllama4.py
sglang/srt/multimodal/processors/phi4mm.py
sglang/srt/multimodal/processors/pixtral.py
sglang/srt/multimodal/processors/qwen_audio.py
sglang/srt/multimodal/processors/qwen_vl.py
sglang/srt/multimodal/processors/step3_vl.py
sglang/srt/multimodal/processors/vila.py
sglang/srt/parser/code_completion_parser.py
sglang/srt/parser/conversation.py
sglang/srt/parser/harmony_parser.py
sglang/srt/parser/jinja_template_utils.py
sglang/srt/parser/reasoning_parser.py
sglang/srt/sampling/custom_logit_processor.py
sglang/srt/sampling/sampling_batch_info.py
sglang/srt/sampling/sampling_params.py
sglang/srt/sampling/penaltylib/__init__.py
sglang/srt/sampling/penaltylib/frequency_penalty.py
sglang/srt/sampling/penaltylib/min_new_tokens.py
sglang/srt/sampling/penaltylib/orchestrator.py
sglang/srt/sampling/penaltylib/presence_penalty.py
sglang/srt/speculative/build_eagle_tree.py
sglang/srt/speculative/eagle_draft_cuda_graph_runner.py
sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py
sglang/srt/speculative/eagle_utils.py
sglang/srt/speculative/eagle_worker.py
sglang/srt/speculative/spec_info.py
sglang/srt/tokenizer/tiktoken_tokenizer.py
sglang/srt/weight_sync/tensor_bucket.py
sglang/srt/weight_sync/utils.py
sglang/test/__init__.py
sglang/test/doc_patch.py
sglang/test/few_shot_gsm8k.py
sglang/test/few_shot_gsm8k_engine.py
sglang/test/run_eval.py
sglang/test/runners.py
sglang/test/send_one.py
sglang/test/simple_eval_common.py
sglang/test/simple_eval_gpqa.py
sglang/test/simple_eval_humaneval.py
sglang/test/simple_eval_math.py
sglang/test/simple_eval_mgsm.py
sglang/test/simple_eval_mmlu.py
sglang/test/test_activation.py
sglang/test/test_block_fp8.py
sglang/test/test_block_fp8_deep_gemm_blackwell.py
sglang/test/test_block_fp8_ep.py
sglang/test/test_custom_ops.py
sglang/test/test_cutlass_moe.py
sglang/test/test_cutlass_w4a8_moe.py
sglang/test/test_deepep_utils.py
sglang/test/test_dynamic_grad_mode.py
sglang/test/test_fp4_moe.py
sglang/test/test_layernorm.py
sglang/test/test_marlin_moe.py
sglang/test/test_marlin_utils.py
sglang/test/test_programs.py
sglang/test/test_utils.py
sglang/test/attention/__init__.py
sglang/test/attention/test_flashattn_backend.py
sglang/test/attention/test_flashattn_mla_backend.py
sglang/test/attention/test_prefix_chunk_info.py
sglang/test/attention/test_trtllm_mla_backend.py
\ No newline at end of file
aiohttp
requests
tqdm
numpy
IPython
setproctitle
[all]
sglang[srt]
sglang[openai]
sglang[anthropic]
sglang[torch_memory_saver]
sglang[decord]
[all_cpu]
sglang[srt_cpu]
sglang[openai]
sglang[anthropic]
sglang[decord]
[all_hip]
sglang[srt_hip]
sglang[openai]
sglang[anthropic]
sglang[decord]
[all_hpu]
sglang[srt_hpu]
sglang[openai]
sglang[anthropic]
sglang[decord]
[all_npu]
sglang[srt_npu]
sglang[openai]
sglang[anthropic]
sglang[decord]
[all_xpu]
sglang[srt_xpu]
sglang[openai]
sglang[anthropic]
sglang[decord]
[anthropic]
anthropic>=0.20.0
[blackwell]
sglang[runtime_common]
sgl-kernel
torch==2.8.0
torchaudio==2.8.0
torchvision
cuda-python
flashinfer_python==0.3.0
[decord]
decord
[dev]
sglang[all]
sglang[test]
[dev_cpu]
sglang[all_cpu]
sglang[test]
[dev_hip]
sglang[all_hip]
sglang[test]
[dev_hpu]
sglang[all_hpu]
sglang[test]
[dev_xpu]
sglang[all_xpu]
sglang[test]
[litellm]
litellm>=1.0.0
[openai]
openai==1.99.1
tiktoken
[runtime_common]
blobfile==3.0.0
build
compressed-tensors
datasets
einops
fastapi
hf_transfer
huggingface_hub
interegular
llguidance<0.8.0,>=0.7.11
modelscope
msgspec
ninja
openai==1.99.1
openai-harmony==0.0.4
orjson
outlines==0.1.11
packaging
partial_json_parser
pillow
prometheus-client>=0.20.0
psutil
pybase64
pydantic
pynvml
python-multipart
pyzmq>=25.1.2
sentencepiece
soundfile==0.13.1
scipy
timm==1.0.16
tiktoken
torchao==0.9.0
transformers==4.56.0
uvicorn
uvloop
xgrammar==0.1.23
[srt]
sglang[runtime_common]
sgl-kernel==0.3.8
torch==2.8.0
torchaudio==2.8.0
torchvision
cuda-python
flashinfer_python==0.3.0
[srt_cpu]
sglang[runtime_common]
[srt_hip]
sglang[runtime_common]
torch
petit_kernel==0.0.2
wave-lang==1.0.1
[srt_hpu]
sglang[runtime_common]
[srt_npu]
sglang[runtime_common]
[srt_xpu]
sglang[runtime_common]
[test]
accelerate
expecttest
jsonlines
matplotlib
pandas
peft
sentence_transformers
pytest
tabulate
[torch_memory_saver]
torch_memory_saver==0.0.8
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment