Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
48473684
Unverified
Commit
48473684
authored
Mar 08, 2025
by
Lianmin Zheng
Committed by
GitHub
Mar 08, 2025
Browse files
Split test_mla.py into two files (#4216)
parent
b3251e9f
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
116 additions
and
102 deletions
+116
-102
.github/workflows/pr-test-amd.yml
.github/workflows/pr-test-amd.yml
+2
-2
test/srt/run_suite.py
test/srt/run_suite.py
+1
-0
test/srt/test_mla.py
test/srt/test_mla.py
+0
-100
test/srt/test_mla_deepseek_v3.py
test/srt/test_mla_deepseek_v3.py
+113
-0
No files found.
.github/workflows/pr-test-amd.yml
View file @
48473684
...
@@ -90,11 +90,11 @@ jobs:
...
@@ -90,11 +90,11 @@ jobs:
-
name
:
MLA TEST
-
name
:
MLA TEST
timeout-minutes
:
20
timeout-minutes
:
20
run
:
|
run
:
|
docker exec -w /sglang-checkout/test/srt ci_sglang python3 test_mla.py
TestMLA
docker exec -w /sglang-checkout/test/srt ci_sglang python3 test_mla.py
finish
:
finish
:
needs
:
[
needs
:
[
accuracy-test-1-gpu-amd
accuracy-test-1-gpu-amd
,
mla-test-1-gpu-amd
]
]
runs-on
:
ubuntu-latest
runs-on
:
ubuntu-latest
steps
:
steps
:
...
...
test/srt/run_suite.py
View file @
48473684
...
@@ -24,6 +24,7 @@ suites = {
...
@@ -24,6 +24,7 @@ suites = {
"test_gguf.py"
,
"test_gguf.py"
,
"test_input_embeddings.py"
,
"test_input_embeddings.py"
,
"test_mla.py"
,
"test_mla.py"
,
"test_mla_deepseek_v3.py"
,
"test_mla_flashinfer.py"
,
"test_mla_flashinfer.py"
,
"test_mla_fp8.py"
,
"test_mla_fp8.py"
,
"test_json_constrained.py"
,
"test_json_constrained.py"
,
...
...
test/srt/test_mla.py
View file @
48473684
import
unittest
import
unittest
from
types
import
SimpleNamespace
from
types
import
SimpleNamespace
import
requests
import
torch
from
sglang.srt.utils
import
kill_process_tree
from
sglang.srt.utils
import
kill_process_tree
from
sglang.test.few_shot_gsm8k
import
run_eval
as
run_eval_few_shot_gsm8k
from
sglang.test.run_eval
import
run_eval
from
sglang.test.run_eval
import
run_eval
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_MLA_MODEL_NAME_FOR_TEST
,
DEFAULT_MLA_MODEL_NAME_FOR_TEST
,
...
@@ -56,101 +52,5 @@ class TestMLA(unittest.TestCase):
...
@@ -56,101 +52,5 @@ class TestMLA(unittest.TestCase):
self
.
assertGreater
(
metrics
[
"score"
],
0.8
)
self
.
assertGreater
(
metrics
[
"score"
],
0.8
)
class
TestDeepseekV3
(
unittest
.
TestCase
):
@
classmethod
def
setUpClass
(
cls
):
cls
.
model
=
"lmsys/sglang-ci-dsv3-test"
cls
.
base_url
=
DEFAULT_URL_FOR_TEST
other_args
=
[
"--trust-remote-code"
]
if
torch
.
cuda
.
is_available
()
and
torch
.
version
.
cuda
:
other_args
.
extend
([
"--enable-torch-compile"
,
"--cuda-graph-max-bs"
,
"2"
])
cls
.
process
=
popen_launch_server
(
cls
.
model
,
cls
.
base_url
,
timeout
=
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
other_args
=
other_args
,
)
@
classmethod
def
tearDownClass
(
cls
):
kill_process_tree
(
cls
.
process
.
pid
)
def
test_gsm8k
(
self
):
args
=
SimpleNamespace
(
num_shots
=
5
,
data_path
=
None
,
num_questions
=
200
,
max_new_tokens
=
512
,
parallel
=
128
,
host
=
"http://127.0.0.1"
,
port
=
int
(
self
.
base_url
.
split
(
":"
)[
-
1
]),
)
metrics
=
run_eval_few_shot_gsm8k
(
args
)
print
(
metrics
)
self
.
assertGreater
(
metrics
[
"accuracy"
],
0.62
)
class
TestDeepseekV3MTP
(
unittest
.
TestCase
):
@
classmethod
def
setUpClass
(
cls
):
cls
.
model
=
"lmsys/sglang-ci-dsv3-test"
cls
.
base_url
=
DEFAULT_URL_FOR_TEST
other_args
=
[
"--trust-remote-code"
]
if
torch
.
cuda
.
is_available
()
and
torch
.
version
.
cuda
:
other_args
.
extend
(
[
"--cuda-graph-max-bs"
,
"2"
,
"--disable-radix"
,
"--enable-torch-compile"
,
"--torch-compile-max-bs"
,
"1"
,
"--speculative-algorithm"
,
"EAGLE"
,
"--speculative-draft"
,
"lmsys/sglang-ci-dsv3-test-NextN"
,
"--speculative-num-steps"
,
"2"
,
"--speculative-eagle-topk"
,
"4"
,
"--speculative-num-draft-tokens"
,
"4"
,
]
)
cls
.
process
=
popen_launch_server
(
cls
.
model
,
cls
.
base_url
,
timeout
=
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
other_args
=
other_args
,
)
@
classmethod
def
tearDownClass
(
cls
):
kill_process_tree
(
cls
.
process
.
pid
)
def
test_gsm8k
(
self
):
requests
.
get
(
self
.
base_url
+
"/flush_cache"
)
args
=
SimpleNamespace
(
num_shots
=
5
,
data_path
=
None
,
num_questions
=
200
,
max_new_tokens
=
512
,
parallel
=
128
,
host
=
"http://127.0.0.1"
,
port
=
int
(
self
.
base_url
.
split
(
":"
)[
-
1
]),
)
metrics
=
run_eval_few_shot_gsm8k
(
args
)
print
(
metrics
)
self
.
assertGreater
(
metrics
[
"accuracy"
],
0.60
)
server_info
=
requests
.
get
(
self
.
base_url
+
"/get_server_info"
)
avg_spec_accept_length
=
server_info
.
json
()[
"avg_spec_accept_length"
]
print
(
f
"
{
avg_spec_accept_length
=
}
"
)
self
.
assertGreater
(
avg_spec_accept_length
,
2.5
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
test/srt/test_mla_deepseek_v3.py
0 → 100644
View file @
48473684
import
unittest
from
types
import
SimpleNamespace
import
requests
import
torch
from
sglang.srt.utils
import
kill_process_tree
from
sglang.test.few_shot_gsm8k
import
run_eval
as
run_eval_few_shot_gsm8k
from
sglang.test.test_utils
import
(
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_URL_FOR_TEST
,
popen_launch_server
,
)
class
TestDeepseekV3
(
unittest
.
TestCase
):
@
classmethod
def
setUpClass
(
cls
):
cls
.
model
=
"lmsys/sglang-ci-dsv3-test"
cls
.
base_url
=
DEFAULT_URL_FOR_TEST
other_args
=
[
"--trust-remote-code"
]
if
torch
.
cuda
.
is_available
()
and
torch
.
version
.
cuda
:
other_args
.
extend
([
"--enable-torch-compile"
,
"--cuda-graph-max-bs"
,
"2"
])
cls
.
process
=
popen_launch_server
(
cls
.
model
,
cls
.
base_url
,
timeout
=
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
other_args
=
other_args
,
)
@
classmethod
def
tearDownClass
(
cls
):
kill_process_tree
(
cls
.
process
.
pid
)
def
test_gsm8k
(
self
):
args
=
SimpleNamespace
(
num_shots
=
5
,
data_path
=
None
,
num_questions
=
200
,
max_new_tokens
=
512
,
parallel
=
128
,
host
=
"http://127.0.0.1"
,
port
=
int
(
self
.
base_url
.
split
(
":"
)[
-
1
]),
)
metrics
=
run_eval_few_shot_gsm8k
(
args
)
print
(
metrics
)
self
.
assertGreater
(
metrics
[
"accuracy"
],
0.62
)
class
TestDeepseekV3MTP
(
unittest
.
TestCase
):
@
classmethod
def
setUpClass
(
cls
):
cls
.
model
=
"lmsys/sglang-ci-dsv3-test"
cls
.
base_url
=
DEFAULT_URL_FOR_TEST
other_args
=
[
"--trust-remote-code"
]
if
torch
.
cuda
.
is_available
()
and
torch
.
version
.
cuda
:
other_args
.
extend
(
[
"--cuda-graph-max-bs"
,
"2"
,
"--disable-radix"
,
"--enable-torch-compile"
,
"--torch-compile-max-bs"
,
"1"
,
"--speculative-algorithm"
,
"EAGLE"
,
"--speculative-draft"
,
"lmsys/sglang-ci-dsv3-test-NextN"
,
"--speculative-num-steps"
,
"2"
,
"--speculative-eagle-topk"
,
"4"
,
"--speculative-num-draft-tokens"
,
"4"
,
]
)
cls
.
process
=
popen_launch_server
(
cls
.
model
,
cls
.
base_url
,
timeout
=
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
other_args
=
other_args
,
)
@
classmethod
def
tearDownClass
(
cls
):
kill_process_tree
(
cls
.
process
.
pid
)
def
test_gsm8k
(
self
):
requests
.
get
(
self
.
base_url
+
"/flush_cache"
)
args
=
SimpleNamespace
(
num_shots
=
5
,
data_path
=
None
,
num_questions
=
200
,
max_new_tokens
=
512
,
parallel
=
128
,
host
=
"http://127.0.0.1"
,
port
=
int
(
self
.
base_url
.
split
(
":"
)[
-
1
]),
)
metrics
=
run_eval_few_shot_gsm8k
(
args
)
print
(
metrics
)
self
.
assertGreater
(
metrics
[
"accuracy"
],
0.60
)
server_info
=
requests
.
get
(
self
.
base_url
+
"/get_server_info"
)
avg_spec_accept_length
=
server_info
.
json
()[
"avg_spec_accept_length"
]
print
(
f
"
{
avg_spec_accept_length
=
}
"
)
self
.
assertGreater
(
avg_spec_accept_length
,
2.5
)
if
__name__
==
"__main__"
:
unittest
.
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment