Unverified Commit b8559764 authored by Huapeng Zhou's avatar Huapeng Zhou Committed by GitHub
Browse files

[Test] Add flashmla attention backend test (#5587)

parent 56f6589e
...@@ -31,3 +31,6 @@ pip install cuda-python nvidia-cuda-nvrtc-cu12 ...@@ -31,3 +31,6 @@ pip install cuda-python nvidia-cuda-nvrtc-cu12
# For lmms_evals evaluating MMMU # For lmms_evals evaluating MMMU
git clone --branch v0.3.3 --depth 1 https://github.com/EvolvingLMMs-Lab/lmms-eval.git git clone --branch v0.3.3 --depth 1 https://github.com/EvolvingLMMs-Lab/lmms-eval.git
pip install -e lmms-eval/ pip install -e lmms-eval/
# Install FlashMLA for attention backend tests
pip install git+https://github.com/deepseek-ai/FlashMLA.git
...@@ -51,6 +51,7 @@ suites = { ...@@ -51,6 +51,7 @@ suites = {
TestFile("test_mla_int8_deepseek_v3.py", 389), TestFile("test_mla_int8_deepseek_v3.py", 389),
TestFile("test_mla_flashinfer.py", 395), TestFile("test_mla_flashinfer.py", 395),
TestFile("test_mla_fp8.py", 153), TestFile("test_mla_fp8.py", 153),
TestFile("test_flash_mla_attention_backend.py", 300),
TestFile("test_no_chunked_prefill.py", 108), TestFile("test_no_chunked_prefill.py", 108),
TestFile("test_no_overlap_scheduler.py", 216), TestFile("test_no_overlap_scheduler.py", 216),
TestFile("test_openai_server.py", 149), TestFile("test_openai_server.py", 149),
......
"""
Usage:
python3 -m unittest test_flash_mla_attention_backend.TestFlashMLAAttnBackend.test_mmlu
"""
import unittest
from types import SimpleNamespace
from sglang.srt.utils import kill_process_tree
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
DEFAULT_MLA_MODEL_NAME_FOR_TEST,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
is_in_ci,
popen_launch_server,
run_bench_one_batch,
)
class TestFlashMLAAttnBackend(unittest.TestCase):
def test_latency(self):
output_throughput = run_bench_one_batch(
DEFAULT_MLA_MODEL_NAME_FOR_TEST,
[
"--attention-backend",
"flashmla",
"--enable-torch-compile",
"--cuda-graph-max-bs",
"16",
"--trust-remote-code",
],
)
if is_in_ci():
self.assertGreater(output_throughput, 153)
def test_mmlu(self):
model = DEFAULT_MLA_MODEL_NAME_FOR_TEST
base_url = DEFAULT_URL_FOR_TEST
process = popen_launch_server(
model,
base_url,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
other_args=["--attention-backend", "flashmla", "--trust-remote-code"],
)
try:
args = SimpleNamespace(
base_url=base_url,
model=model,
eval_name="mmlu",
num_examples=64,
num_threads=32,
)
metrics = run_eval(args)
self.assertGreaterEqual(metrics["score"], 0.2)
finally:
kill_process_tree(process.pid)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment