Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
b8559764
Unverified
Commit
b8559764
authored
May 06, 2025
by
Huapeng Zhou
Committed by
GitHub
May 05, 2025
Browse files
[Test] Add flashmla attention backend test (#5587)
parent
56f6589e
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
68 additions
and
0 deletions
+68
-0
scripts/ci_install_dependency.sh
scripts/ci_install_dependency.sh
+3
-0
test/srt/run_suite.py
test/srt/run_suite.py
+1
-0
test/srt/test_flash_mla_attention_backend.py
test/srt/test_flash_mla_attention_backend.py
+64
-0
No files found.
scripts/ci_install_dependency.sh
View file @
b8559764
...
@@ -31,3 +31,6 @@ pip install cuda-python nvidia-cuda-nvrtc-cu12
...
@@ -31,3 +31,6 @@ pip install cuda-python nvidia-cuda-nvrtc-cu12
# For lmms_evals evaluating MMMU
# For lmms_evals evaluating MMMU
git clone
--branch
v0.3.3
--depth
1 https://github.com/EvolvingLMMs-Lab/lmms-eval.git
git clone
--branch
v0.3.3
--depth
1 https://github.com/EvolvingLMMs-Lab/lmms-eval.git
pip
install
-e
lmms-eval/
pip
install
-e
lmms-eval/
# Install FlashMLA for attention backend tests
pip
install
git+https://github.com/deepseek-ai/FlashMLA.git
test/srt/run_suite.py
View file @
b8559764
...
@@ -51,6 +51,7 @@ suites = {
...
@@ -51,6 +51,7 @@ suites = {
TestFile
(
"test_mla_int8_deepseek_v3.py"
,
389
),
TestFile
(
"test_mla_int8_deepseek_v3.py"
,
389
),
TestFile
(
"test_mla_flashinfer.py"
,
395
),
TestFile
(
"test_mla_flashinfer.py"
,
395
),
TestFile
(
"test_mla_fp8.py"
,
153
),
TestFile
(
"test_mla_fp8.py"
,
153
),
TestFile
(
"test_flash_mla_attention_backend.py"
,
300
),
TestFile
(
"test_no_chunked_prefill.py"
,
108
),
TestFile
(
"test_no_chunked_prefill.py"
,
108
),
TestFile
(
"test_no_overlap_scheduler.py"
,
216
),
TestFile
(
"test_no_overlap_scheduler.py"
,
216
),
TestFile
(
"test_openai_server.py"
,
149
),
TestFile
(
"test_openai_server.py"
,
149
),
...
...
test/srt/test_flash_mla_attention_backend.py
0 → 100644
View file @
b8559764
"""
Usage:
python3 -m unittest test_flash_mla_attention_backend.TestFlashMLAAttnBackend.test_mmlu
"""
import
unittest
from
types
import
SimpleNamespace
from
sglang.srt.utils
import
kill_process_tree
from
sglang.test.run_eval
import
run_eval
from
sglang.test.test_utils
import
(
DEFAULT_MLA_MODEL_NAME_FOR_TEST
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_URL_FOR_TEST
,
is_in_ci
,
popen_launch_server
,
run_bench_one_batch
,
)
class
TestFlashMLAAttnBackend
(
unittest
.
TestCase
):
def
test_latency
(
self
):
output_throughput
=
run_bench_one_batch
(
DEFAULT_MLA_MODEL_NAME_FOR_TEST
,
[
"--attention-backend"
,
"flashmla"
,
"--enable-torch-compile"
,
"--cuda-graph-max-bs"
,
"16"
,
"--trust-remote-code"
,
],
)
if
is_in_ci
():
self
.
assertGreater
(
output_throughput
,
153
)
def
test_mmlu
(
self
):
model
=
DEFAULT_MLA_MODEL_NAME_FOR_TEST
base_url
=
DEFAULT_URL_FOR_TEST
process
=
popen_launch_server
(
model
,
base_url
,
timeout
=
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
other_args
=
[
"--attention-backend"
,
"flashmla"
,
"--trust-remote-code"
],
)
try
:
args
=
SimpleNamespace
(
base_url
=
base_url
,
model
=
model
,
eval_name
=
"mmlu"
,
num_examples
=
64
,
num_threads
=
32
,
)
metrics
=
run_eval
(
args
)
self
.
assertGreaterEqual
(
metrics
[
"score"
],
0.2
)
finally
:
kill_process_tree
(
process
.
pid
)
if
__name__
==
"__main__"
:
unittest
.
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment