Unverified Commit ccaf1f99 authored by Lianmin Zheng's avatar Lianmin Zheng Committed by GitHub
Browse files

[CI] Print summary on github actions (#2274)

parent 7d1485d3
...@@ -816,3 +816,8 @@ def run_mulit_request_test( ...@@ -816,3 +816,8 @@ def run_mulit_request_test(
chunked_prefill_size, chunked_prefill_size,
assert_has_abort=False, assert_has_abort=False,
) )
def write_github_step_summary(content):
with open(os.environ["GITHUB_STEP_SUMMARY"], "a") as f:
f.write(content)
...@@ -6,6 +6,7 @@ from sglang.test.test_utils import ( ...@@ -6,6 +6,7 @@ from sglang.test.test_utils import (
DEFAULT_MOE_MODEL_NAME_FOR_TEST, DEFAULT_MOE_MODEL_NAME_FOR_TEST,
is_in_ci, is_in_ci,
run_bench_serving, run_bench_serving,
write_github_step_summary,
) )
...@@ -20,6 +21,10 @@ class TestBenchServing(unittest.TestCase): ...@@ -20,6 +21,10 @@ class TestBenchServing(unittest.TestCase):
) )
if is_in_ci(): if is_in_ci():
write_github_step_summary(
f"### test_offline_throughput_default\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
self.assertGreater(res["output_throughput"], 3350) self.assertGreater(res["output_throughput"], 3350)
def test_offline_throughput_non_stream_small_batch_size(self): def test_offline_throughput_non_stream_small_batch_size(self):
...@@ -36,6 +41,10 @@ class TestBenchServing(unittest.TestCase): ...@@ -36,6 +41,10 @@ class TestBenchServing(unittest.TestCase):
) )
if is_in_ci(): if is_in_ci():
write_github_step_summary(
f"### test_offline_throughput_non_stream_small_batch_size\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
# There is a regression with torch 2.5 # There is a regression with torch 2.5
# This number was 950 for torch 2.4 # This number was 950 for torch 2.4
self.assertGreater(res["output_throughput"], 800) self.assertGreater(res["output_throughput"], 800)
...@@ -49,6 +58,10 @@ class TestBenchServing(unittest.TestCase): ...@@ -49,6 +58,10 @@ class TestBenchServing(unittest.TestCase):
) )
if is_in_ci(): if is_in_ci():
write_github_step_summary(
f"### test_offline_throughput_without_radix_cache\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
self.assertGreater(res["output_throughput"], 3350) self.assertGreater(res["output_throughput"], 3350)
def test_offline_throughput_without_chunked_prefill(self): def test_offline_throughput_without_chunked_prefill(self):
...@@ -60,6 +73,10 @@ class TestBenchServing(unittest.TestCase): ...@@ -60,6 +73,10 @@ class TestBenchServing(unittest.TestCase):
) )
if is_in_ci(): if is_in_ci():
write_github_step_summary(
f"### test_offline_throughput_without_chunked_prefill\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
self.assertGreater(res["output_throughput"], 2600) self.assertGreater(res["output_throughput"], 2600)
def test_offline_throughput_with_triton_attention_backend(self): def test_offline_throughput_with_triton_attention_backend(self):
...@@ -76,6 +93,10 @@ class TestBenchServing(unittest.TestCase): ...@@ -76,6 +93,10 @@ class TestBenchServing(unittest.TestCase):
) )
if is_in_ci(): if is_in_ci():
write_github_step_summary(
f"### test_offline_throughput_with_triton_attention_backend\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
self.assertGreater(res["output_throughput"], 3450) self.assertGreater(res["output_throughput"], 3450)
def test_offline_throughput_default_fp8(self): def test_offline_throughput_default_fp8(self):
...@@ -87,6 +108,10 @@ class TestBenchServing(unittest.TestCase): ...@@ -87,6 +108,10 @@ class TestBenchServing(unittest.TestCase):
) )
if is_in_ci(): if is_in_ci():
write_github_step_summary(
f"### test_offline_throughput_default_fp8\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
self.assertGreater(res["output_throughput"], 3850) self.assertGreater(res["output_throughput"], 3850)
def test_online_latency_default(self): def test_online_latency_default(self):
...@@ -98,6 +123,10 @@ class TestBenchServing(unittest.TestCase): ...@@ -98,6 +123,10 @@ class TestBenchServing(unittest.TestCase):
) )
if is_in_ci(): if is_in_ci():
write_github_step_summary(
f"### test_online_latency_default\n"
f'median_e2e_latency_ms : {res["median_e2e_latency_ms"]:.2f} token/s\n'
)
self.assertLess(res["median_e2e_latency_ms"], 12000) self.assertLess(res["median_e2e_latency_ms"], 12000)
self.assertLess(res["median_ttft_ms"], 86) self.assertLess(res["median_ttft_ms"], 86)
self.assertLess(res["median_itl_ms"], 10) self.assertLess(res["median_itl_ms"], 10)
...@@ -111,6 +140,10 @@ class TestBenchServing(unittest.TestCase): ...@@ -111,6 +140,10 @@ class TestBenchServing(unittest.TestCase):
) )
if is_in_ci(): if is_in_ci():
write_github_step_summary(
f"### test_moe_offline_throughput_default\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
self.assertGreater(res["output_throughput"], 2150) self.assertGreater(res["output_throughput"], 2150)
def test_moe_offline_throughput_without_radix_cache(self): def test_moe_offline_throughput_without_radix_cache(self):
...@@ -122,6 +155,10 @@ class TestBenchServing(unittest.TestCase): ...@@ -122,6 +155,10 @@ class TestBenchServing(unittest.TestCase):
) )
if is_in_ci(): if is_in_ci():
write_github_step_summary(
f"### test_moe_offline_throughput_without_radix_cache\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
self.assertGreater(res["output_throughput"], 2150) self.assertGreater(res["output_throughput"], 2150)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment