Unverified Commit 4a102a2b authored by Lianmin Zheng's avatar Lianmin Zheng Committed by GitHub
Browse files

Minor style fix in cuda_graph_runner.py (#7053)

parent 6406408a
...@@ -152,10 +152,11 @@ def get_batch_sizes_to_capture(model_runner: ModelRunner): ...@@ -152,10 +152,11 @@ def get_batch_sizes_to_capture(model_runner: ModelRunner):
) )
gpu_mem = get_device_memory_capacity() gpu_mem = get_device_memory_capacity()
if gpu_mem is not None and gpu_mem > 96 * 1024: if gpu_mem is not None:
capture_bs += list(range(160, 257, 8)) if gpu_mem > 90 * 1024: # H200
if gpu_mem is not None and gpu_mem > 180 * 1000: capture_bs += list(range(160, 257, 8))
capture_bs += list(range(256, 513, 16)) if gpu_mem > 160 * 1000: # B200, MI300
capture_bs += list(range(256, 513, 16))
if max(capture_bs) > model_runner.req_to_token_pool.size: if max(capture_bs) > model_runner.req_to_token_pool.size:
# In some cases (e.g., with a small GPU or --max-running-requests), the #max-running-requests # In some cases (e.g., with a small GPU or --max-running-requests), the #max-running-requests
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment