Unverified Commit 49420741 authored by JieXin Liang's avatar JieXin Liang Committed by GitHub
Browse files

[fix] fix initialization of _ENABLE_TORCH_INFERENCE_MODE (#4549)

parent ba52fd18
......@@ -78,6 +78,11 @@ time_infos = {}
HIP_FP8_E4M3_FNUZ_MAX = 224.0
def get_bool_env_var(name: str, default: str = "false") -> bool:
value = os.getenv(name, default)
return value.lower() in ("true", "1")
# https://pytorch.org/docs/stable/notes/hip.html#checking-for-hip
def is_hip() -> bool:
return torch.version.hip is not None
......@@ -128,9 +133,9 @@ def is_cuda_available():
return is_cuda()
_ENABLE_TORCH_INFERENCE_MODE = os.getenv(
_ENABLE_TORCH_INFERENCE_MODE = get_bool_env_var(
"SGLANG_ENABLE_TORCH_INFERENCE_MODE", "false"
).lower() in ("true", "1")
)
class DynamicGradMode(_DecoratorContextManager):
......@@ -1327,11 +1332,6 @@ def set_gpu_proc_affinity(
logger.info(f"Process {pid} gpu_id {gpu_id} is running on CPUs: {p.cpu_affinity()}")
def get_bool_env_var(name: str, default: str = "false") -> bool:
value = os.getenv(name, default)
return value.lower() in ("true", "1")
@lru_cache(maxsize=2)
def disable_request_logging() -> bool:
return get_bool_env_var("SGLANG_DISABLE_REQUEST_LOGGING")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment