Unverified Commit 932e2637 authored by Yuwei An's avatar Yuwei An Committed by GitHub
Browse files

Compilation Folder Reset (#11539)


Signed-off-by: default avatarOasis-Git <ayw.sirius19@gmail.com>
parent 43f80884
......@@ -15,15 +15,11 @@ import torch
import torch.fx as fx
from torch._dispatch.python import enable_python_dispatcher
from sglang.srt.model_executor.compilation.compilation_config import CompilationConfig
from sglang.srt.model_executor.compilation.compilation_counter import (
compilation_counter,
)
from sglang.srt.model_executor.compilation.compiler_interface import InductorAdaptor
from sglang.srt.model_executor.compilation.cuda_piecewise_backend import (
CUDAPiecewiseBackend,
)
from sglang.srt.model_executor.compilation.pass_manager import PostGradPassManager
from sglang.srt.compilation.compilation_config import CompilationConfig
from sglang.srt.compilation.compilation_counter import compilation_counter
from sglang.srt.compilation.compiler_interface import InductorAdaptor
from sglang.srt.compilation.cuda_piecewise_backend import CUDAPiecewiseBackend
from sglang.srt.compilation.pass_manager import PostGradPassManager
logger = logging.getLogger(__name__)
......
......@@ -10,7 +10,7 @@ from typing import Any, Callable, Optional, Union
import torch
from sglang.srt.model_executor.compilation.compilation_config import CompilationConfig
from sglang.srt.compilation.compilation_config import CompilationConfig
logger = logging.getLogger(__name__)
......@@ -134,7 +134,7 @@ def install_torch_compiled(
dyn_map = dynamic_arg_dims or _infer_dynamic_arg_dims_from_annotations(unbound_fwd)
if backend_factory is None:
from sglang.srt.model_executor.compilation.backend import SGLangBackend
from sglang.srt.compilation.backend import SGLangBackend
backend_factory = lambda gm, ex: SGLangBackend(compile_config, graph_pool)(
gm, ex
......
......@@ -12,10 +12,8 @@ import torch
import torch._inductor.compile_fx
import torch.fx as fx
from sglang.srt.model_executor.compilation.compilation_counter import (
compilation_counter,
)
from sglang.srt.model_executor.compilation.inductor_pass import pass_context
from sglang.srt.compilation.compilation_counter import compilation_counter
from sglang.srt.compilation.inductor_pass import pass_context
class CompilerInterface:
......
......@@ -9,11 +9,9 @@ from unittest.mock import patch
import torch
import torch.fx as fx
import sglang.srt.model_executor.compilation.weak_ref_tensor_jit
from sglang.srt.model_executor.compilation.compilation_config import CompilationConfig
from sglang.srt.model_executor.compilation.compilation_counter import (
compilation_counter,
)
import sglang.srt.compilation.weak_ref_tensor_jit
from sglang.srt.compilation.compilation_config import CompilationConfig
from sglang.srt.compilation.compilation_counter import compilation_counter
logger = logging.getLogger(__name__)
......
......@@ -8,8 +8,8 @@ from typing import Optional, Union
import torch
from torch._higher_order_ops.auto_functionalize import auto_functionalized
from sglang.srt.model_executor.compilation.fx_utils import is_func
from sglang.srt.model_executor.compilation.inductor_pass import SGLangInductorPass
from sglang.srt.compilation.fx_utils import is_func
from sglang.srt.compilation.inductor_pass import SGLangInductorPass
logger = logging.getLogger(__name__)
......
......@@ -4,10 +4,8 @@ import logging
from torch import fx as fx
from sglang.srt.model_executor.compilation.fix_functionalization import (
FixFunctionalizationPass,
)
from sglang.srt.model_executor.compilation.inductor_pass import (
from sglang.srt.compilation.fix_functionalization import FixFunctionalizationPass
from sglang.srt.compilation.inductor_pass import (
CustomGraphPass,
InductorPass,
SGLangInductorPass,
......
......@@ -337,7 +337,6 @@ class GroupCoordinator:
else:
ca_max_size = 8 * 1024 * 1024
try:
# print(f"ca_max_size: {ca_max_size}")
self.ca_comm = CustomAllreduce(
group=self.cpu_group,
device=self.device,
......
......@@ -20,15 +20,13 @@ from typing import TYPE_CHECKING, Optional
import torch
from torch import nn
from sglang.srt.compilation.piecewise_context_manager import get_forward_context
from sglang.srt.utils import direct_register_custom_op
if TYPE_CHECKING:
from sglang.srt.layers.quantization.base_config import QuantizationConfig
from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.model_executor.compilation.piecewise_context_manager import (
get_forward_context,
)
from sglang.srt.utils import direct_register_custom_op
class AttentionType(Enum):
"""
......@@ -112,7 +110,7 @@ class RadixAttention(nn.Module):
k = k.view(-1, self.tp_k_head_num, self.v_head_dim)
if forward_batch.forward_mode.is_extend() and get_forward_context() is not None:
output = torch.zeros_like(q)
output = torch.empty_like(q)
torch.ops.sglang.unified_attention_with_output(
q, k, v, output, save_kv_cache, self.layer_id
)
......
......@@ -24,6 +24,9 @@ from typing import TYPE_CHECKING, Union
import torch
import tqdm
from sglang.srt.compilation.compilation_config import CompilationConfig
from sglang.srt.compilation.compile import install_torch_compiled, set_compiled
from sglang.srt.compilation.piecewise_context_manager import set_forward_context
from sglang.srt.custom_op import CustomOp
from sglang.srt.distributed import get_tensor_model_parallel_rank
from sglang.srt.distributed.device_communicators.pynccl_allocator import (
......@@ -38,14 +41,6 @@ from sglang.srt.layers.dp_attention import (
)
from sglang.srt.layers.logits_processor import LogitsProcessorOutput
from sglang.srt.layers.torchao_utils import save_gemlite_cache
from sglang.srt.model_executor.compilation.compilation_config import CompilationConfig
from sglang.srt.model_executor.compilation.compile import (
install_torch_compiled,
set_compiled,
)
from sglang.srt.model_executor.compilation.piecewise_context_manager import (
set_forward_context,
)
from sglang.srt.model_executor.forward_batch_info import (
CaptureHiddenMode,
ForwardBatch,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment