Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
2ac189ed
Unverified
Commit
2ac189ed
authored
Mar 11, 2025
by
HandH1998
Committed by
GitHub
Mar 10, 2025
Browse files
Amd test fp8 (#4261)
parent
5a6400ee
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
84 additions
and
0 deletions
+84
-0
.github/workflows/pr-test-amd.yml
.github/workflows/pr-test-amd.yml
+1
-0
python/sglang/srt/configs/model_config.py
python/sglang/srt/configs/model_config.py
+1
-0
python/sglang/srt/layers/quantization/fp8_utils.py
python/sglang/srt/layers/quantization/fp8_utils.py
+4
-0
python/sglang/test/test_utils.py
python/sglang/test/test_utils.py
+4
-0
test/srt/run_suite.py
test/srt/run_suite.py
+1
-0
test/srt/test_eval_fp8_accuracy.py
test/srt/test_eval_fp8_accuracy.py
+73
-0
No files found.
.github/workflows/pr-test-amd.yml
View file @
2ac189ed
...
@@ -55,6 +55,7 @@ jobs:
...
@@ -55,6 +55,7 @@ jobs:
timeout-minutes
:
20
timeout-minutes
:
20
run
:
|
run
:
|
docker exec -w /sglang-checkout/test/srt ci_sglang python3 test_eval_accuracy_large.py
docker exec -w /sglang-checkout/test/srt ci_sglang python3 test_eval_accuracy_large.py
docker exec -w /sglang-checkout/test/srt ci_sglang python3 test_eval_fp8_accuracy.py
docker exec -w /sglang-checkout/test/srt ci_sglang python3 models/test_qwen_models.py
docker exec -w /sglang-checkout/test/srt ci_sglang python3 models/test_qwen_models.py
mla-test-1-gpu-amd
:
mla-test-1-gpu-amd
:
...
...
python/sglang/srt/configs/model_config.py
View file @
2ac189ed
...
@@ -237,6 +237,7 @@ class ModelConfig:
...
@@ -237,6 +237,7 @@ class ModelConfig:
"compressed_tensors"
,
"compressed_tensors"
,
"compressed-tensors"
,
"compressed-tensors"
,
"fbgemm_fp8"
,
"fbgemm_fp8"
,
"w8a8_fp8"
,
]
]
optimized_quantization_methods
=
[
optimized_quantization_methods
=
[
"fp8"
,
"fp8"
,
...
...
python/sglang/srt/layers/quantization/fp8_utils.py
View file @
2ac189ed
...
@@ -32,6 +32,10 @@ if _is_cuda:
...
@@ -32,6 +32,10 @@ if _is_cuda:
else
:
else
:
from
sgl_kernel
import
fp8_scaled_mm
from
sgl_kernel
import
fp8_scaled_mm
# Input scaling factors are no longer optional in _scaled_mm starting
# from pytorch 2.5. Allocating a dummy tensor to pass as input_scale
TORCH_DEVICE_IDENTITY
=
torch
.
ones
(
1
,
dtype
=
torch
.
float32
)
def
cutlass_fp8_supported
():
def
cutlass_fp8_supported
():
if
not
_is_cuda
:
if
not
_is_cuda
:
...
...
python/sglang/test/test_utils.py
View file @
2ac189ed
...
@@ -28,6 +28,10 @@ from sglang.test.run_eval import run_eval
...
@@ -28,6 +28,10 @@ from sglang.test.run_eval import run_eval
from
sglang.utils
import
get_exception_traceback
from
sglang.utils
import
get_exception_traceback
DEFAULT_FP8_MODEL_NAME_FOR_TEST
=
"neuralmagic/Meta-Llama-3.1-8B-FP8"
DEFAULT_FP8_MODEL_NAME_FOR_TEST
=
"neuralmagic/Meta-Llama-3.1-8B-FP8"
DEFAULT_FP8_MODEL_NAME_FOR_ACCURACY_TEST
=
"neuralmagic/Meta-Llama-3-8B-Instruct-FP8"
DEFAULT_FP8_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST
=
(
"neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8-dynamic"
)
DEFAULT_MODEL_NAME_FOR_TEST
=
"meta-llama/Llama-3.1-8B-Instruct"
DEFAULT_MODEL_NAME_FOR_TEST
=
"meta-llama/Llama-3.1-8B-Instruct"
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
=
"meta-llama/Llama-3.2-1B-Instruct"
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
=
"meta-llama/Llama-3.2-1B-Instruct"
DEFAULT_MOE_MODEL_NAME_FOR_TEST
=
"mistralai/Mixtral-8x7B-Instruct-v0.1"
DEFAULT_MOE_MODEL_NAME_FOR_TEST
=
"mistralai/Mixtral-8x7B-Instruct-v0.1"
...
...
test/srt/run_suite.py
View file @
2ac189ed
...
@@ -69,6 +69,7 @@ suites = {
...
@@ -69,6 +69,7 @@ suites = {
TestFile
(
"test_vision_llm.py"
,
18.4
),
TestFile
(
"test_vision_llm.py"
,
18.4
),
TestFile
(
"test_vision_openai_server.py"
,
344
),
TestFile
(
"test_vision_openai_server.py"
,
344
),
TestFile
(
"test_w8a8_quantization.py"
,
46
),
TestFile
(
"test_w8a8_quantization.py"
,
46
),
TestFile
(
"test_eval_fp8_accuracy.py"
,
172
),
],
],
"nightly"
:
[
"nightly"
:
[
TestFile
(
"test_nightly_gsm8k_eval.py"
),
TestFile
(
"test_nightly_gsm8k_eval.py"
),
...
...
test/srt/test_eval_fp8_accuracy.py
0 → 100644
View file @
2ac189ed
import
unittest
from
types
import
SimpleNamespace
from
sglang.srt.utils
import
kill_process_tree
from
sglang.test.run_eval
import
run_eval
from
sglang.test.test_utils
import
(
DEFAULT_FP8_MODEL_NAME_FOR_ACCURACY_TEST
,
DEFAULT_FP8_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_URL_FOR_TEST
,
popen_launch_server
,
)
class
TestEvalFP8Accuracy
(
unittest
.
TestCase
):
@
classmethod
def
setUpClass
(
cls
):
cls
.
model
=
DEFAULT_FP8_MODEL_NAME_FOR_ACCURACY_TEST
cls
.
base_url
=
DEFAULT_URL_FOR_TEST
cls
.
process
=
popen_launch_server
(
cls
.
model
,
cls
.
base_url
,
timeout
=
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
)
@
classmethod
def
tearDownClass
(
cls
):
kill_process_tree
(
cls
.
process
.
pid
)
def
test_mmlu
(
self
):
args
=
SimpleNamespace
(
base_url
=
self
.
base_url
,
model
=
self
.
model
,
eval_name
=
"mmlu"
,
num_examples
=
64
,
num_threads
=
32
,
temperature
=
0.1
,
)
metrics
=
run_eval
(
args
)
self
.
assertGreaterEqual
(
metrics
[
"score"
],
0.62
)
class
TestEvalFP8DynamicQuantAccuracy
(
unittest
.
TestCase
):
@
classmethod
def
setUpClass
(
cls
):
cls
.
model
=
DEFAULT_FP8_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST
cls
.
base_url
=
DEFAULT_URL_FOR_TEST
cls
.
process
=
popen_launch_server
(
cls
.
model
,
cls
.
base_url
,
timeout
=
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
other_args
=
[
"--quantization"
,
"w8a8_fp8"
],
)
@
classmethod
def
tearDownClass
(
cls
):
kill_process_tree
(
cls
.
process
.
pid
)
def
test_mmlu
(
self
):
args
=
SimpleNamespace
(
base_url
=
self
.
base_url
,
model
=
self
.
model
,
eval_name
=
"mmlu"
,
num_examples
=
64
,
num_threads
=
32
,
temperature
=
0.1
,
)
metrics
=
run_eval
(
args
)
self
.
assertGreaterEqual
(
metrics
[
"score"
],
0.70
)
if
__name__
==
"__main__"
:
unittest
.
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment