Unverified Commit 932e2637 authored by Yuwei An's avatar Yuwei An Committed by GitHub
Browse files

Compilation Folder Reset (#11539)


Signed-off-by: default avatarOasis-Git <ayw.sirius19@gmail.com>
parent 43f80884
...@@ -15,15 +15,11 @@ import torch ...@@ -15,15 +15,11 @@ import torch
import torch.fx as fx import torch.fx as fx
from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher
from sglang.srt.model_executor.compilation.compilation_config import CompilationConfig from sglang.srt.compilation.compilation_config import CompilationConfig
from sglang.srt.model_executor.compilation.compilation_counter import ( from sglang.srt.compilation.compilation_counter import compilation_counter
compilation_counter, from sglang.srt.compilation.compiler_interface import InductorAdaptor
) from sglang.srt.compilation.cuda_piecewise_backend import CUDAPiecewiseBackend
from sglang.srt.model_executor.compilation.compiler_interface import InductorAdaptor from sglang.srt.compilation.pass_manager import PostGradPassManager
from sglang.srt.model_executor.compilation.cuda_piecewise_backend import (
CUDAPiecewiseBackend,
)
from sglang.srt.model_executor.compilation.pass_manager import PostGradPassManager
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -10,7 +10,7 @@ from typing import Any, Callable, Optional, Union ...@@ -10,7 +10,7 @@ from typing import Any, Callable, Optional, Union
import torch import torch
from sglang.srt.model_executor.compilation.compilation_config import CompilationConfig from sglang.srt.compilation.compilation_config import CompilationConfig
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -134,7 +134,7 @@ def install_torch_compiled( ...@@ -134,7 +134,7 @@ def install_torch_compiled(
dyn_map = dynamic_arg_dims or _infer_dynamic_arg_dims_from_annotations(unbound_fwd) dyn_map = dynamic_arg_dims or _infer_dynamic_arg_dims_from_annotations(unbound_fwd)
if backend_factory is None: if backend_factory is None:
from sglang.srt.model_executor.compilation.backend import SGLangBackend from sglang.srt.compilation.backend import SGLangBackend
backend_factory = lambda gm, ex: SGLangBackend(compile_config, graph_pool)( backend_factory = lambda gm, ex: SGLangBackend(compile_config, graph_pool)(
gm, ex gm, ex
......
...@@ -12,10 +12,8 @@ import torch ...@@ -12,10 +12,8 @@ import torch
import torch._inductor.compile_fx import torch._inductor.compile_fx
import torch.fx as fx import torch.fx as fx
from sglang.srt.model_executor.compilation.compilation_counter import ( from sglang.srt.compilation.compilation_counter import compilation_counter
compilation_counter, from sglang.srt.compilation.inductor_pass import pass_context
)
from sglang.srt.model_executor.compilation.inductor_pass import pass_context
class CompilerInterface: class CompilerInterface:
......
...@@ -9,11 +9,9 @@ from unittest.mock import patch ...@@ -9,11 +9,9 @@ from unittest.mock import patch
import torch import torch
import torch.fx as fx import torch.fx as fx
import sglang.srt.model_executor.compilation.weak_ref_tensor_jit import sglang.srt.compilation.weak_ref_tensor_jit
from sglang.srt.model_executor.compilation.compilation_config import CompilationConfig from sglang.srt.compilation.compilation_config import CompilationConfig
from sglang.srt.model_executor.compilation.compilation_counter import ( from sglang.srt.compilation.compilation_counter import compilation_counter
compilation_counter,
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -8,8 +8,8 @@ from typing import Optional, Union ...@@ -8,8 +8,8 @@ from typing import Optional, Union
import torch import torch
from torch._higher_order_ops.auto_functionalize import auto_functionalized from torch._higher_order_ops.auto_functionalize import auto_functionalized
from sglang.srt.model_executor.compilation.fx_utils import is_func from sglang.srt.compilation.fx_utils import is_func
from sglang.srt.model_executor.compilation.inductor_pass import SGLangInductorPass from sglang.srt.compilation.inductor_pass import SGLangInductorPass
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -4,10 +4,8 @@ import logging ...@@ -4,10 +4,8 @@ import logging
from torch import fx as fx from torch import fx as fx
from sglang.srt.model_executor.compilation.fix_functionalization import ( from sglang.srt.compilation.fix_functionalization import FixFunctionalizationPass
FixFunctionalizationPass, from sglang.srt.compilation.inductor_pass import (
)
from sglang.srt.model_executor.compilation.inductor_pass import (
CustomGraphPass, CustomGraphPass,
InductorPass, InductorPass,
SGLangInductorPass, SGLangInductorPass,
......
...@@ -337,7 +337,6 @@ class GroupCoordinator: ...@@ -337,7 +337,6 @@ class GroupCoordinator:
else: else:
ca_max_size = 8 * 1024 * 1024 ca_max_size = 8 * 1024 * 1024
try: try:
# print(f"ca_max_size: {ca_max_size}")
self.ca_comm = CustomAllreduce( self.ca_comm = CustomAllreduce(
group=self.cpu_group, group=self.cpu_group,
device=self.device, device=self.device,
......
...@@ -20,15 +20,13 @@ from typing import TYPE_CHECKING, Optional ...@@ -20,15 +20,13 @@ from typing import TYPE_CHECKING, Optional
import torch import torch
from torch import nn from torch import nn
from sglang.srt.compilation.piecewise_context_manager import get_forward_context
from sglang.srt.utils import direct_register_custom_op
if TYPE_CHECKING: if TYPE_CHECKING:
from sglang.srt.layers.quantization.base_config import QuantizationConfig from sglang.srt.layers.quantization.base_config import QuantizationConfig
from sglang.srt.model_executor.forward_batch_info import ForwardBatch from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.model_executor.compilation.piecewise_context_manager import (
get_forward_context,
)
from sglang.srt.utils import direct_register_custom_op
class AttentionType(Enum): class AttentionType(Enum):
""" """
...@@ -112,7 +110,7 @@ class RadixAttention(nn.Module): ...@@ -112,7 +110,7 @@ class RadixAttention(nn.Module):
k = k.view(-1, self.tp_k_head_num, self.v_head_dim) k = k.view(-1, self.tp_k_head_num, self.v_head_dim)
if forward_batch.forward_mode.is_extend() and get_forward_context() is not None: if forward_batch.forward_mode.is_extend() and get_forward_context() is not None:
output = torch.zeros_like(q) output = torch.empty_like(q)
torch.ops.sglang.unified_attention_with_output( torch.ops.sglang.unified_attention_with_output(
q, k, v, output, save_kv_cache, self.layer_id q, k, v, output, save_kv_cache, self.layer_id
) )
......
...@@ -24,6 +24,9 @@ from typing import TYPE_CHECKING, Union ...@@ -24,6 +24,9 @@ from typing import TYPE_CHECKING, Union
import torch import torch
import tqdm import tqdm
from sglang.srt.compilation.compilation_config import CompilationConfig
from sglang.srt.compilation.compile import install_torch_compiled, set_compiled
from sglang.srt.compilation.piecewise_context_manager import set_forward_context
from sglang.srt.custom_op import CustomOp from sglang.srt.custom_op import CustomOp
from sglang.srt.distributed import get_tensor_model_parallel_rank from sglang.srt.distributed import get_tensor_model_parallel_rank
from sglang.srt.distributed.device_communicators.pynccl_allocator import ( from sglang.srt.distributed.device_communicators.pynccl_allocator import (
...@@ -38,14 +41,6 @@ from sglang.srt.layers.dp_attention import ( ...@@ -38,14 +41,6 @@ from sglang.srt.layers.dp_attention import (
) )
from sglang.srt.layers.logits_processor import LogitsProcessorOutput from sglang.srt.layers.logits_processor import LogitsProcessorOutput
from sglang.srt.layers.torchao_utils import save_gemlite_cache from sglang.srt.layers.torchao_utils import save_gemlite_cache
from sglang.srt.model_executor.compilation.compilation_config import CompilationConfig
from sglang.srt.model_executor.compilation.compile import (
install_torch_compiled,
set_compiled,
)
from sglang.srt.model_executor.compilation.piecewise_context_manager import (
set_forward_context,
)
from sglang.srt.model_executor.forward_batch_info import ( from sglang.srt.model_executor.forward_batch_info import (
CaptureHiddenMode, CaptureHiddenMode,
ForwardBatch, ForwardBatch,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment