Unverified Commit 4606e2a3 authored by who who who's avatar who who who Committed by GitHub
Browse files

Bug: fix capture_bs (#3857)

parent 127998cc
...@@ -114,6 +114,10 @@ def get_batch_sizes_to_capture(model_runner: ModelRunner): ...@@ -114,6 +114,10 @@ def get_batch_sizes_to_capture(model_runner: ModelRunner):
capture_bs = list(range(1, 33)) + [64, 128] capture_bs = list(range(1, 33)) + [64, 128]
else: else:
capture_bs = [1, 2, 4] + [i * 8 for i in range(1, 21)] capture_bs = [1, 2, 4] + [i * 8 for i in range(1, 21)]
if is_hip_:
capture_bs += [i * 8 for i in range(21, 33)]
if max(capture_bs) > model_runner.req_to_token_pool.size: if max(capture_bs) > model_runner.req_to_token_pool.size:
# In some case (e.g., with a small GPU or --max-running-requests), the #max-running-requests # In some case (e.g., with a small GPU or --max-running-requests), the #max-running-requests
# is very samll. We add more values here to make sure we capture the maximum bs. # is very samll. We add more values here to make sure we capture the maximum bs.
...@@ -132,8 +136,6 @@ def get_batch_sizes_to_capture(model_runner: ModelRunner): ...@@ -132,8 +136,6 @@ def get_batch_sizes_to_capture(model_runner: ModelRunner):
if bs <= model_runner.req_to_token_pool.size if bs <= model_runner.req_to_token_pool.size
and bs <= server_args.cuda_graph_max_bs and bs <= server_args.cuda_graph_max_bs
] ]
if is_hip_:
capture_bs += [i * 8 for i in range(21, 33)]
compile_bs = ( compile_bs = (
[bs for bs in capture_bs if bs <= server_args.torch_compile_max_bs] [bs for bs in capture_bs if bs <= server_args.torch_compile_max_bs]
if server_args.enable_torch_compile if server_args.enable_torch_compile
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment