"docs/zh_cn/vscode:/vscode.git/clone" did not exist on "fe851fbc27e4aebbbf1bd39b8538fc8807504bc9"
Unverified Commit 2f79f588 authored by Yineng Zhang's avatar Yineng Zhang Committed by GitHub
Browse files

feat: use sgl-kernel 0.0.3 in sglang (#3179)

parent 8a96f749
...@@ -27,7 +27,7 @@ runtime_common = [ ...@@ -27,7 +27,7 @@ runtime_common = [
] ]
srt = [ srt = [
"sglang[runtime_common]", "cuda-python", "sglang[runtime_common]", "cuda-python",
"sgl-kernel>=0.0.2.post18", "torch", "vllm==0.6.4.post1", "sgl-kernel>=0.0.3", "torch", "vllm==0.6.4.post1",
"flashinfer==0.1.6" "flashinfer==0.1.6"
] ]
......
...@@ -20,10 +20,10 @@ import torch ...@@ -20,10 +20,10 @@ import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from sglang.srt.utils import is_flashinfer_available from sglang.srt.utils import is_cuda_available
if is_flashinfer_available(): if is_cuda_available():
from flashinfer.activation import gelu_and_mul, gelu_tanh_and_mul, silu_and_mul from sgl_kernel import gelu_and_mul, gelu_tanh_and_mul, silu_and_mul
from vllm.model_executor.custom_op import CustomOp from vllm.model_executor.custom_op import CustomOp
...@@ -149,8 +149,8 @@ def get_act_fn( ...@@ -149,8 +149,8 @@ def get_act_fn(
return act_fn return act_fn
if not is_flashinfer_available(): if not is_cuda_available():
logger.info( logger.info(
"FlashInfer is not available on Non-NV platforms. Fallback to other kernel libraries." "sgl-kernel is not available on Non-NV platforms. Fallback to other kernel libraries."
) )
from vllm.model_executor.layers.activation import GeluAndMul, SiluAndMul from vllm.model_executor.layers.activation import GeluAndMul, SiluAndMul
...@@ -19,10 +19,10 @@ from typing import Optional, Tuple, Union ...@@ -19,10 +19,10 @@ from typing import Optional, Tuple, Union
import torch import torch
import torch.nn as nn import torch.nn as nn
from sglang.srt.utils import is_flashinfer_available from sglang.srt.utils import is_cuda_available
if is_flashinfer_available(): if is_cuda_available():
from flashinfer.norm import ( from sgl_kernel import (
fused_add_rmsnorm, fused_add_rmsnorm,
gemma_fused_add_rmsnorm, gemma_fused_add_rmsnorm,
gemma_rmsnorm, gemma_rmsnorm,
...@@ -121,8 +121,8 @@ class GemmaRMSNorm(CustomOp): ...@@ -121,8 +121,8 @@ class GemmaRMSNorm(CustomOp):
return out return out
if not is_flashinfer_available(): if not is_cuda_available():
logger.info( logger.info(
"FlashInfer is not available on Non-NV platforms. Fallback to other kernel libraries." "sgl-kernel is not available on Non-NV platforms. Fallback to other kernel libraries."
) )
from vllm.model_executor.layers.layernorm import GemmaRMSNorm, RMSNorm from vllm.model_executor.layers.layernorm import GemmaRMSNorm, RMSNorm
...@@ -10,14 +10,10 @@ from sglang.srt.layers.dp_attention import get_attention_tp_group ...@@ -10,14 +10,10 @@ from sglang.srt.layers.dp_attention import get_attention_tp_group
from sglang.srt.layers.logits_processor import LogitsProcessorOutput from sglang.srt.layers.logits_processor import LogitsProcessorOutput
from sglang.srt.managers.schedule_batch import global_server_args_dict from sglang.srt.managers.schedule_batch import global_server_args_dict
from sglang.srt.sampling.sampling_batch_info import SamplingBatchInfo from sglang.srt.sampling.sampling_batch_info import SamplingBatchInfo
from sglang.srt.utils import ( from sglang.srt.utils import crash_on_warnings, get_bool_env_var, is_cuda_available
crash_on_warnings,
get_bool_env_var, if is_cuda_available():
is_flashinfer_available, from sgl_kernel import (
)
if is_flashinfer_available():
from flashinfer.sampling import (
min_p_sampling_from_probs, min_p_sampling_from_probs,
top_k_renorm_prob, top_k_renorm_prob,
top_k_top_p_sampling_from_probs, top_k_top_p_sampling_from_probs,
......
...@@ -56,12 +56,12 @@ from sglang.srt.layers.vocab_parallel_embedding import ( ...@@ -56,12 +56,12 @@ from sglang.srt.layers.vocab_parallel_embedding import (
from sglang.srt.managers.schedule_batch import global_server_args_dict from sglang.srt.managers.schedule_batch import global_server_args_dict
from sglang.srt.model_executor.forward_batch_info import ForwardBatch from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.model_loader.weight_utils import default_weight_loader from sglang.srt.model_loader.weight_utils import default_weight_loader
from sglang.srt.utils import is_flashinfer_available, is_hip from sglang.srt.utils import is_cuda_available, is_hip
is_hip_ = is_hip() is_hip_ = is_hip()
if is_flashinfer_available(): if is_cuda_available():
from flashinfer import bmm_fp8 from sgl_kernel import bmm_fp8
class DeepseekV2MLP(nn.Module): class DeepseekV2MLP(nn.Module):
......
...@@ -40,10 +40,10 @@ from sglang.srt.layers.vocab_parallel_embedding import ( ...@@ -40,10 +40,10 @@ from sglang.srt.layers.vocab_parallel_embedding import (
from sglang.srt.managers.schedule_batch import global_server_args_dict from sglang.srt.managers.schedule_batch import global_server_args_dict
from sglang.srt.model_executor.forward_batch_info import ForwardBatch from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.model_loader.weight_utils import default_weight_loader from sglang.srt.model_loader.weight_utils import default_weight_loader
from sglang.srt.utils import is_flashinfer_available from sglang.srt.utils import is_cuda_available
if is_flashinfer_available(): if is_cuda_available():
from flashinfer import bmm_fp8 from sgl_kernel import bmm_fp8
class MiniCPM3MLP(nn.Module): class MiniCPM3MLP(nn.Module):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment