Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
408ba022
Unverified
Commit
408ba022
authored
Apr 26, 2025
by
Stefan He
Committed by
GitHub
Apr 26, 2025
Browse files
Add Llama 4 to FA3 test (#5509)
parent
094891c0
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
178 additions
and
104 deletions
+178
-104
.github/workflows/pr-test.yml
.github/workflows/pr-test.yml
+20
-0
python/sglang/test/test_utils.py
python/sglang/test/test_utils.py
+6
-0
test/srt/run_suite.py
test/srt/run_suite.py
+3
-1
test/srt/test_fa3.py
test/srt/test_fa3.py
+149
-103
No files found.
.github/workflows/pr-test.yml
View file @
408ba022
...
@@ -87,6 +87,26 @@ jobs:
...
@@ -87,6 +87,26 @@ jobs:
cd test/srt
cd test/srt
python3 run_suite.py --suite per-commit-2-gpu
python3 run_suite.py --suite per-commit-2-gpu
unit-test-backend-8-gpu
:
if
:
(github.repository == 'sgl-project/sglang' || github.event_name == 'pull_request') &&
github.event.pull_request.draft ==
false
runs-on
:
8-gpu-runner
steps
:
-
name
:
Checkout code
uses
:
actions/checkout@v4
-
name
:
Install dependencies
env
:
FLASHINFER_REPO
:
${{ inputs.version == 'nightly' && 'https://flashinfer.ai/whl/nightly/cu124/torch2.5/flashinfer-python' || 'https://flashinfer.ai/whl/cu124/torch2.5/flashinfer-python' }}
run
:
|
bash scripts/ci_install_dependency.sh
-
name
:
Run test
timeout-minutes
:
30
run
:
|
cd test/srt
python3 run_suite.py --suite per-commit-8-gpu
performance-test-1-gpu-part-1
:
performance-test-1-gpu-part-1
:
if
:
(github.repository == 'sgl-project/sglang' || github.event_name == 'pull_request') &&
if
:
(github.repository == 'sgl-project/sglang' || github.event_name == 'pull_request') &&
github.event.pull_request.draft ==
false
github.event.pull_request.draft ==
false
...
...
python/sglang/test/test_utils.py
View file @
408ba022
...
@@ -44,7 +44,13 @@ DEFAULT_FP8_MODEL_NAME_FOR_MODELOPT_QUANT_ACCURACY_TEST = (
...
@@ -44,7 +44,13 @@ DEFAULT_FP8_MODEL_NAME_FOR_MODELOPT_QUANT_ACCURACY_TEST = (
)
)
DEFAULT_MODEL_NAME_FOR_TEST
=
"meta-llama/Llama-3.1-8B-Instruct"
DEFAULT_MODEL_NAME_FOR_TEST
=
"meta-llama/Llama-3.1-8B-Instruct"
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3
=
"jamesliu1/sglang-EAGLE3-Llama-3.1-Instruct-8B"
DEFAULT_MODEL_NAME_FOR_TEST_MLA
=
"lmsys/sglang-ci-dsv3-test"
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN
=
"lmsys/sglang-ci-dsv3-test-NextN"
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
=
"meta-llama/Llama-3.2-1B-Instruct"
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
=
"meta-llama/Llama-3.2-1B-Instruct"
DEFAULT_MODEL_NAME_FOR_TEST_LOCAL_ATTENTION
=
(
"meta-llama/Llama-4-Scout-17B-16E-Instruct"
)
DEFAULT_MOE_MODEL_NAME_FOR_TEST
=
"mistralai/Mixtral-8x7B-Instruct-v0.1"
DEFAULT_MOE_MODEL_NAME_FOR_TEST
=
"mistralai/Mixtral-8x7B-Instruct-v0.1"
DEFAULT_SMALL_MOE_MODEL_NAME_FOR_TEST
=
"Qwen/Qwen1.5-MoE-A2.7B"
DEFAULT_SMALL_MOE_MODEL_NAME_FOR_TEST
=
"Qwen/Qwen1.5-MoE-A2.7B"
DEFAULT_SMALL_EMBEDDING_MODEL_NAME_FOR_TEST
=
"Alibaba-NLP/gte-Qwen2-1.5B-instruct"
DEFAULT_SMALL_EMBEDDING_MODEL_NAME_FOR_TEST
=
"Alibaba-NLP/gte-Qwen2-1.5B-instruct"
...
...
test/srt/run_suite.py
View file @
408ba022
...
@@ -30,7 +30,6 @@ suites = {
...
@@ -30,7 +30,6 @@ suites = {
TestFile
(
"test_chunked_prefill.py"
,
336
),
TestFile
(
"test_chunked_prefill.py"
,
336
),
TestFile
(
"test_eagle_infer.py"
,
500
),
TestFile
(
"test_eagle_infer.py"
,
500
),
TestFile
(
"test_ebnf_constrained.py"
),
TestFile
(
"test_ebnf_constrained.py"
),
TestFile
(
"test_fa3.py"
,
200
),
TestFile
(
"test_fp8_kernel.py"
,
8
),
TestFile
(
"test_fp8_kernel.py"
,
8
),
TestFile
(
"test_embedding_openai_server.py"
,
36
),
TestFile
(
"test_embedding_openai_server.py"
,
36
),
TestFile
(
"test_hidden_states.py"
,
55
),
TestFile
(
"test_hidden_states.py"
,
55
),
...
@@ -91,6 +90,9 @@ suites = {
...
@@ -91,6 +90,9 @@ suites = {
TestFile
(
"test_update_weights_from_distributed.py"
,
100
),
TestFile
(
"test_update_weights_from_distributed.py"
,
100
),
TestFile
(
"test_verl_engine.py"
,
100
),
TestFile
(
"test_verl_engine.py"
,
100
),
],
],
"per-commit-8-gpu"
:
[
TestFile
(
"test_fa3.py"
,
30
),
],
"nightly"
:
[
"nightly"
:
[
TestFile
(
"test_nightly_gsm8k_eval.py"
),
TestFile
(
"test_nightly_gsm8k_eval.py"
),
],
],
...
...
test/srt/test_fa3.py
View file @
408ba022
import
os
import
unittest
import
unittest
from
types
import
SimpleNamespace
from
types
import
SimpleNamespace
...
@@ -8,47 +9,83 @@ from sglang.srt.utils import get_device_sm, kill_process_tree
...
@@ -8,47 +9,83 @@ from sglang.srt.utils import get_device_sm, kill_process_tree
from
sglang.test.few_shot_gsm8k
import
run_eval
as
run_eval_few_shot_gsm8k
from
sglang.test.few_shot_gsm8k
import
run_eval
as
run_eval_few_shot_gsm8k
from
sglang.test.test_utils
import
(
from
sglang.test.test_utils
import
(
DEFAULT_MODEL_NAME_FOR_TEST
,
DEFAULT_MODEL_NAME_FOR_TEST
,
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3
,
DEFAULT_MODEL_NAME_FOR_TEST_LOCAL_ATTENTION
,
DEFAULT_MODEL_NAME_FOR_TEST_MLA
,
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_URL_FOR_TEST
,
DEFAULT_URL_FOR_TEST
,
popen_launch_server
,
popen_launch_server
,
)
)
GSM_DATASET_PATH
=
None
# In case of some machine lack internet connection, we can set OFFLINE_MODE to True.
OFFLINE_MODE
=
False
# Change the path below when OFFLINE_MODE is True.
OFFLINE_PATH_DICT
=
{
DEFAULT_MODEL_NAME_FOR_TEST
:
"/shared/public/elr-models/meta-llama/Meta-Llama-3.1-8B-Instruct"
,
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3
:
"/shared/public/elr-models/jamesliu1/sglang-EAGLE3-Llama-3.1-Instruct-8B"
,
DEFAULT_MODEL_NAME_FOR_TEST_MLA
:
"/shared/public/sharing/deepseek/dsv3-test/snapshots/"
,
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN
:
"/shared/public/sharing/deepseek/dsv3-test-NextN/snapshots/"
,
GSM_DATASET_PATH
:
"/shared/public/data/gsm8k/test.jsonl"
,
}
if
OFFLINE_MODE
:
DEFAULT_MODEL_NAME_FOR_TEST
=
OFFLINE_PATH_DICT
[
DEFAULT_MODEL_NAME_FOR_TEST
]
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3
=
OFFLINE_PATH_DICT
[
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3
]
DEFAULT_MODEL_NAME_FOR_TEST_MLA
=
OFFLINE_PATH_DICT
[
DEFAULT_MODEL_NAME_FOR_TEST_MLA
]
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN
=
OFFLINE_PATH_DICT
[
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN
]
GSM_DATASET_PATH
=
OFFLINE_PATH_DICT
[
GSM_DATASET_PATH
]
# Default server arguments shared across all tests
DEFAULT_SERVER_ARGS
=
[
"--trust-remote-code"
,
"--enable-torch-compile"
,
"--cuda-graph-max-bs"
,
"2"
,
"--attention-backend"
,
"fa3"
,
]
"""
"""
Integration test for python/sglang/srt/layers/attention/flashattention_backend.py
Integration test for python/sglang/srt/layers/attention/flashattention_backend.py
"""
"""
# Change to your own model if testing model is not public.
MODEL_USED_FOR_TEST
=
DEFAULT_MODEL_NAME_FOR_TEST
MODEL_USED_FOR_TEST_MLA
=
"lmsys/sglang-ci-dsv3-test"
# Setting data path to None uses default data path in few_shot_gsm8k eval test.
DATA_PATH
=
None
@
unittest
.
skipIf
(
get_device_sm
()
<
90
,
"Test requires CUDA SM 90 or higher"
)
@
unittest
.
skipIf
(
get_device_sm
()
<
90
,
"Test requires CUDA SM 90 or higher"
)
class
BaseFlashAttentionTest
(
unittest
.
TestCase
):
class
BaseFlashAttentionTest
(
unittest
.
TestCase
):
"""Base class for
FlashAt
te
n
ti
on tests to reduce code duplica
tion."""
"""Base class for te
s
ti
ng FlashAtten
tion
3
."""
model
=
MODEL_
USED
_FOR_TEST
model
=
DEFAULT_
MODEL_
NAME
_FOR_TEST
base_url
=
DEFAULT_URL_FOR_TEST
base_url
=
DEFAULT_URL_FOR_TEST
accuracy_threshold
=
0.62
accuracy_threshold
=
0.65
# derived tests need to override this
speculative_decode
=
False
spec_decode_threshold
=
1.0
# derived spec decoding tests need to override this
@
classmethod
@
classmethod
def
get_server_args
(
cls
):
def
get_server_args
(
cls
):
"""Return the arguments for the server launch. Override in subclasses."""
"""Return the arguments for the server launch. Override in subclasses."""
args
=
[
return
DEFAULT_SERVER_ARGS
"--trust-remote-code"
,
"--enable-torch-compile"
,
"--attention-backend"
,
"fa3"
,
]
return
args
@
classmethod
@
classmethod
def
setUpClass
(
cls
):
def
setUpClass
(
cls
):
# disable deep gemm precompile to make launch server faster
# please don't do this if you want to make your inference workload faster
os
.
environ
[
"SGL_JIT_DEEPGEMM_PRECOMPILE"
]
=
"False"
cls
.
process
=
popen_launch_server
(
cls
.
process
=
popen_launch_server
(
cls
.
model
,
cls
.
model
,
cls
.
base_url
,
cls
.
base_url
,
timeout
=
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
timeout
=
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
other_args
=
cls
.
get_server_args
(),
other_args
=
cls
.
get_server_args
(),
env
=
os
.
environ
,
)
)
@
classmethod
@
classmethod
...
@@ -57,13 +94,13 @@ class BaseFlashAttentionTest(unittest.TestCase):
...
@@ -57,13 +94,13 @@ class BaseFlashAttentionTest(unittest.TestCase):
def
test_gsm8k
(
self
):
def
test_gsm8k
(
self
):
args
=
SimpleNamespace
(
args
=
SimpleNamespace
(
num_shots
=
5
,
num_shots
=
4
,
num_questions
=
2
00
,
num_questions
=
1
00
,
max_new_tokens
=
512
,
max_new_tokens
=
512
,
parallel
=
128
,
parallel
=
128
,
host
=
"http://127.0.0.1"
,
host
=
"http://127.0.0.1"
,
port
=
int
(
self
.
base_url
.
split
(
":"
)[
-
1
]),
port
=
int
(
self
.
base_url
.
split
(
":"
)[
-
1
]),
data_path
=
DATA_PATH
,
data_path
=
GSM_
DATA
SET
_PATH
,
)
)
metrics
=
run_eval_few_shot_gsm8k
(
args
)
metrics
=
run_eval_few_shot_gsm8k
(
args
)
print
(
metrics
)
print
(
metrics
)
...
@@ -72,61 +109,85 @@ class BaseFlashAttentionTest(unittest.TestCase):
...
@@ -72,61 +109,85 @@ class BaseFlashAttentionTest(unittest.TestCase):
metric_key
=
"accuracy"
metric_key
=
"accuracy"
self
.
assertGreater
(
metrics
[
metric_key
],
self
.
accuracy_threshold
)
self
.
assertGreater
(
metrics
[
metric_key
],
self
.
accuracy_threshold
)
if
self
.
speculative_decode
:
server_info
=
requests
.
get
(
self
.
base_url
+
"/get_server_info"
)
avg_spec_accept_length
=
server_info
.
json
()[
"avg_spec_accept_length"
]
print
(
f
"
{
avg_spec_accept_length
=
}
"
)
self
.
assertGreater
(
avg_spec_accept_length
,
self
.
spec_decode_threshold
)
class
TestFlashAttention3MLA
(
BaseFlashAttentionTest
):
"""Test FlashAttention3 with MLA, e.g. deepseek v3 test model"""
class
TestFlashAttention3
(
BaseFlashAttentionTest
):
accuracy_threshold
=
0.60
"""Test FlashAttention3 with MLA model and CUDA graph enabled."""
model
=
DEFAULT_MODEL_NAME_FOR_TEST_MLA
@
classmethod
@
classmethod
def
get_server_args
(
cls
):
def
get_server_args
(
cls
):
args
=
super
().
get_server_args
()
return
DEFAULT_SERVER_ARGS
args
.
extend
(
[
"--cuda-graph-max-bs"
,
"2"
,
]
)
return
args
class
TestFlashAttention3LocalAttn
(
BaseFlashAttentionTest
):
"""Test FlashAttention3 with Model with local attention, e.g. Llama 4."""
class
TestFlashAttention3DisableCudaGraph
(
BaseFlashAttentionTest
):
accuracy_threshold
=
0.70
"""Test FlashAttention3 with CUDA graph disabled."""
model
=
DEFAULT_MODEL_NAME_FOR_TEST_LOCAL_ATTENTION
@
classmethod
@
classmethod
def
get_server_args
(
cls
):
def
get_server_args
(
cls
):
args
=
super
().
get_server_args
()
cloned_args
=
DEFAULT_SERVER_ARGS
.
copy
()
args
.
extend
(
# remove --enable-torch-compile from cloned_args since llama4 does not support it for now
[
cloned_args
.
remove
(
"--enable-torch-compile"
)
"--disable-cuda-graph"
,
# we cannot use scout's 10m context due to this bug: https://github.com/sgl-project/sglang/issues/5755
]
cloned_args
.
extend
([
"--tp"
,
"4"
,
"--context-length"
,
"1000000"
])
)
return
cloned_args
return
args
class
TestFlashAttention3
MLA
(
BaseFlashAttentionTest
):
class
TestFlashAttention3
SpeculativeDecode
(
BaseFlashAttentionTest
):
"""Test FlashAttention3 with
MLA.
"""
"""Test FlashAttention3 with
speculative decode enabled with Llama 3.1 8B and its eagle3 model
"""
model
=
MODEL_USED_FOR_TEST_MLA
model
=
DEFAULT_MODEL_NAME_FOR_TEST
accuracy_threshold
=
0.65
speculative_decode
=
True
spec_decode_threshold
=
1.5
@
classmethod
@
classmethod
def
get_server_args
(
cls
):
def
get_server_args
(
cls
):
args
=
super
().
get_server_args
()
args
=
DEFAULT_SERVER_ARGS
args
.
extend
(
args
.
extend
(
[
[
"--cuda-graph-max-bs"
,
"--cuda-graph-max-bs"
,
"2"
,
"2"
,
"--speculative-algorithm"
,
"EAGLE3"
,
"--speculative-draft"
,
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3
,
"--speculative-num-steps"
,
"3"
,
"--speculative-eagle-topk"
,
"1"
,
"--speculative-num-draft-tokens"
,
"4"
,
"--dtype"
,
"float16"
,
]
]
)
)
return
args
return
args
class
TestFlashAttention3SpeculativeDecode
(
BaseFlashAttentionTest
):
class
TestFlashAttention3SpeculativeDecodeTopk
(
BaseFlashAttentionTest
):
"""Test FlashAttention3 with speculative decode enabled."""
"""Tests FlashAttention3 with enhanced speculative decoding using Llama 3.1 8B and EAGLE3.
This test will be using top-k value > 1 which would verify the other branches of the FA3 code
"""
model
=
"meta-llama/Llama-3.1-8B-Instruct"
model
=
DEFAULT_MODEL_NAME_FOR_TEST
accuracy_threshold
=
0.65
speculative_decode
=
True
spec_decode_threshold
=
1.5
@
classmethod
@
classmethod
def
get_server_args
(
cls
):
def
get_server_args
(
cls
):
args
=
super
().
get_server_args
()
args
=
DEFAULT_SERVER_ARGS
args
.
extend
(
args
.
extend
(
[
[
"--cuda-graph-max-bs"
,
"--cuda-graph-max-bs"
,
...
@@ -134,49 +195,24 @@ class TestFlashAttention3SpeculativeDecode(BaseFlashAttentionTest):
...
@@ -134,49 +195,24 @@ class TestFlashAttention3SpeculativeDecode(BaseFlashAttentionTest):
"--speculative-algorithm"
,
"--speculative-algorithm"
,
"EAGLE3"
,
"EAGLE3"
,
"--speculative-draft"
,
"--speculative-draft"
,
"jamesliu1/sglang-EAGLE3-Llama-3.1-Instruct-8B"
,
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3
,
"--speculative-num-steps"
,
"--speculative-num-steps"
,
"
3
"
,
"
5
"
,
"--speculative-eagle-topk"
,
"--speculative-eagle-topk"
,
"
1
"
,
"
4
"
,
"--speculative-num-draft-tokens"
,
"--speculative-num-draft-tokens"
,
"
3
"
,
"
8
"
,
"--dtype"
,
"--dtype"
,
"float16"
,
"float16"
,
]
]
)
)
return
args
return
args
def
test_gsm8k
(
self
):
"""
Override the test_gsm8k to further test for average speculative accept length.
"""
requests
.
get
(
self
.
base_url
+
"/flush_cache"
)
args
=
SimpleNamespace
(
num_shots
=
5
,
data_path
=
DATA_PATH
,
num_questions
=
200
,
max_new_tokens
=
512
,
parallel
=
128
,
host
=
"http://127.0.0.1"
,
port
=
int
(
self
.
base_url
.
split
(
":"
)[
-
1
]),
)
metrics
=
run_eval_few_shot_gsm8k
(
args
)
print
(
metrics
)
self
.
assertGreater
(
metrics
[
"accuracy"
],
0.60
)
server_info
=
requests
.
get
(
self
.
base_url
+
"/get_server_info"
)
avg_spec_accept_length
=
server_info
.
json
()[
"avg_spec_accept_length"
]
print
(
f
"
{
avg_spec_accept_length
=
}
"
)
self
.
assertGreater
(
avg_spec_accept_length
,
1.5
)
class
TestFlashAttention3SpeculativeDecodeTopk
(
BaseFlashAttentionTest
):
class
TestFlashAttention3SpeculativeDecodeTopk
(
BaseFlashAttentionTest
):
"""Test FlashAttention3 with speculative decode enabled, topk > 1"""
"""Test FlashAttention3 with speculative decode enabled, topk > 1"""
model
=
"meta-llama/Llama-3.1-8B-Instruct"
model
=
DEFAULT_MODEL_NAME_FOR_TEST
@
classmethod
@
classmethod
def
get_server_args
(
cls
):
def
get_server_args
(
cls
):
...
@@ -188,7 +224,7 @@ class TestFlashAttention3SpeculativeDecodeTopk(BaseFlashAttentionTest):
...
@@ -188,7 +224,7 @@ class TestFlashAttention3SpeculativeDecodeTopk(BaseFlashAttentionTest):
"--speculative-algorithm"
,
"--speculative-algorithm"
,
"EAGLE3"
,
"EAGLE3"
,
"--speculative-draft"
,
"--speculative-draft"
,
"jamesliu1/sglang-EAGLE3-Llama-3.1-Instruct-8B"
,
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3
,
"--speculative-num-steps"
,
"--speculative-num-steps"
,
"5"
,
"5"
,
"--speculative-eagle-topk"
,
"--speculative-eagle-topk"
,
...
@@ -209,7 +245,7 @@ class TestFlashAttention3SpeculativeDecodeTopk(BaseFlashAttentionTest):
...
@@ -209,7 +245,7 @@ class TestFlashAttention3SpeculativeDecodeTopk(BaseFlashAttentionTest):
args
=
SimpleNamespace
(
args
=
SimpleNamespace
(
num_shots
=
5
,
num_shots
=
5
,
data_path
=
DATA_PATH
,
data_path
=
GSM_
DATA
SET
_PATH
,
num_questions
=
200
,
num_questions
=
200
,
max_new_tokens
=
512
,
max_new_tokens
=
512
,
parallel
=
128
,
parallel
=
128
,
...
@@ -228,13 +264,16 @@ class TestFlashAttention3SpeculativeDecodeTopk(BaseFlashAttentionTest):
...
@@ -228,13 +264,16 @@ class TestFlashAttention3SpeculativeDecodeTopk(BaseFlashAttentionTest):
class
TestFlashAttention3MLASpeculativeDecode
(
BaseFlashAttentionTest
):
class
TestFlashAttention3MLASpeculativeDecode
(
BaseFlashAttentionTest
):
"""Test FlashAttention3 with speculative decode enabled
.
"""
"""Test FlashAttention3 with speculative decode enabled
with deepseek v3 test model and its nextN model
"""
model
=
MODEL_USED_FOR_TEST_MLA
model
=
DEFAULT_MODEL_NAME_FOR_TEST_MLA
accuracy_threshold
=
0.60
speculative_decode
=
True
spec_decode_threshold
=
1.5
@
classmethod
@
classmethod
def
get_server_args
(
cls
):
def
get_server_args
(
cls
):
args
=
super
().
get_server_args
()
args
=
DEFAULT_SERVER_ARGS
args
.
extend
(
args
.
extend
(
[
[
"--cuda-graph-max-bs"
,
"--cuda-graph-max-bs"
,
...
@@ -242,41 +281,48 @@ class TestFlashAttention3MLASpeculativeDecode(BaseFlashAttentionTest):
...
@@ -242,41 +281,48 @@ class TestFlashAttention3MLASpeculativeDecode(BaseFlashAttentionTest):
"--speculative-algorithm"
,
"--speculative-algorithm"
,
"EAGLE"
,
"EAGLE"
,
"--speculative-draft"
,
"--speculative-draft"
,
"lmsys/sglang-ci-dsv3-test-NextN"
,
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN
,
"--speculative-num-steps"
,
"--speculative-num-steps"
,
"3"
,
"3"
,
"--speculative-eagle-topk"
,
"--speculative-eagle-topk"
,
"1"
,
"1"
,
"--speculative-num-draft-tokens"
,
"--speculative-num-draft-tokens"
,
"
3
"
,
"
4
"
,
]
]
)
)
return
args
return
args
def
test_gsm8k
(
self
):
"""
Override the test_gsm8k to further test for average speculative accept length.
"""
requests
.
get
(
self
.
base_url
+
"/flush_cache"
)
args
=
SimpleNamespace
(
class
TestFlashAttention3MLASpeculativeDecodeTopk
(
BaseFlashAttentionTest
):
num_shots
=
5
,
"""Test FlashAttention3 with speculative decode enabled with deepseek v3 test model and its nextN model
data_path
=
DATA_PATH
,
This test will be using top-k value > 1 which would verify the other branches of the FA3 code
num_questions
=
200
,
"""
max_new_tokens
=
512
,
parallel
=
128
,
host
=
"http://127.0.0.1"
,
port
=
int
(
self
.
base_url
.
split
(
":"
)[
-
1
]),
)
metrics
=
run_eval_few_shot_gsm8k
(
args
)
print
(
metrics
)
self
.
assertGreater
(
metrics
[
"accuracy"
],
0.60
)
model
=
DEFAULT_MODEL_NAME_FOR_TEST_MLA
accuracy_threshold
=
0.60
speculative_decode
=
True
spec_decode_threshold
=
1.5
server_info
=
requests
.
get
(
self
.
base_url
+
"/get_server_info"
)
@
classmethod
avg_spec_accept_length
=
server_info
.
json
()[
"avg_spec_accept_length"
]
def
get_server_args
(
cls
):
print
(
f
"
{
avg_spec_accept_length
=
}
"
)
args
=
DEFAULT_SERVER_ARGS
self
.
assertGreater
(
avg_spec_accept_length
,
1.5
)
args
.
extend
(
[
"--cuda-graph-max-bs"
,
"2"
,
"--speculative-algorithm"
,
"EAGLE"
,
"--speculative-draft"
,
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN
,
"--speculative-num-steps"
,
"5"
,
"--speculative-eagle-topk"
,
"4"
,
"--speculative-num-draft-tokens"
,
"8"
,
]
)
return
args
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment