Unverified Commit ed45e509 authored by Ata Fatahi's avatar Ata Fatahi Committed by GitHub
Browse files

Check gpu availability at server args creation (#2340)


Signed-off-by: default avatarAta Fatahi <immrata@gmail.com>
parent ec52464d
...@@ -20,6 +20,8 @@ import random ...@@ -20,6 +20,8 @@ import random
import tempfile import tempfile
from typing import List, Optional from typing import List, Optional
import torch
from sglang.srt.hf_transformers_utils import check_gguf_file from sglang.srt.hf_transformers_utils import check_gguf_file
from sglang.srt.utils import ( from sglang.srt.utils import (
get_amdgpu_memory_capacity, get_amdgpu_memory_capacity,
...@@ -151,8 +153,11 @@ class ServerArgs: ...@@ -151,8 +153,11 @@ class ServerArgs:
if is_hip(): if is_hip():
gpu_mem = get_amdgpu_memory_capacity() gpu_mem = get_amdgpu_memory_capacity()
else: elif torch.cuda.is_available():
gpu_mem = get_nvgpu_memory_capacity() gpu_mem = get_nvgpu_memory_capacity()
else:
# GPU memory is not known yet or no GPU is available.
gpu_mem = None
# Set mem fraction static, which depends on the tensor parallelism size # Set mem fraction static, which depends on the tensor parallelism size
if self.mem_fraction_static is None: if self.mem_fraction_static is None:
...@@ -169,14 +174,14 @@ class ServerArgs: ...@@ -169,14 +174,14 @@ class ServerArgs:
# Set chunked prefill size, which depends on the gpu memory capacity # Set chunked prefill size, which depends on the gpu memory capacity
if self.chunked_prefill_size is None: if self.chunked_prefill_size is None:
if gpu_mem < 25_000: if gpu_mem is not None and gpu_mem < 25_000:
self.chunked_prefill_size = 2048 self.chunked_prefill_size = 2048
else: else:
self.chunked_prefill_size = 8192 self.chunked_prefill_size = 8192
# Set cuda graph max batch size # Set cuda graph max batch size
if self.cuda_graph_max_bs is None: if self.cuda_graph_max_bs is None:
if gpu_mem < 25_000: if gpu_mem is not None and gpu_mem < 25_000:
self.cuda_graph_max_bs = 8 self.cuda_graph_max_bs = 8
else: else:
self.cuda_graph_max_bs = 160 self.cuda_graph_max_bs = 160
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment