Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
a4d6d6f1
Unverified
Commit
a4d6d6f1
authored
Jan 01, 2025
by
Xiaotong Jiang
Committed by
GitHub
Jan 01, 2025
Browse files
[feat]: Add math eval to CI nightly run (#2663)
Co-authored-by:
Chayenne
<
zhaochen20@outlook.com
>
parent
062c48d2
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
64 additions
and
8 deletions
+64
-8
test/srt/run_suite.py
test/srt/run_suite.py
+1
-0
test/srt/test_nightly_gsm8k_eval.py
test/srt/test_nightly_gsm8k_eval.py
+2
-2
test/srt/test_nightly_human_eval.py
test/srt/test_nightly_human_eval.py
+15
-6
test/srt/test_nightly_math_eval.py
test/srt/test_nightly_math_eval.py
+46
-0
No files found.
test/srt/run_suite.py
View file @
a4d6d6f1
...
@@ -49,6 +49,7 @@ suites = {
...
@@ -49,6 +49,7 @@ suites = {
"nightly"
:
[
"nightly"
:
[
"test_nightly_gsm8k_eval.py"
,
"test_nightly_gsm8k_eval.py"
,
"test_nightly_human_eval.py"
,
"test_nightly_human_eval.py"
,
"test_nightly_math_eval.py"
,
],
],
"sampling/penaltylib"
:
glob
.
glob
(
"sampling/penaltylib"
:
glob
.
glob
(
"sampling/penaltylib/**/test_*.py"
,
recursive
=
True
"sampling/penaltylib/**/test_*.py"
,
recursive
=
True
...
...
test/srt/test_nightly_gsm8k_eval.py
View file @
a4d6d6f1
...
@@ -25,7 +25,7 @@ MODEL_SCORE_THRESHOLDS = {
...
@@ -25,7 +25,7 @@ MODEL_SCORE_THRESHOLDS = {
"deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
:
0.84
,
"deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
:
0.84
,
"google/gemma-2-27b-it"
:
0.92
,
"google/gemma-2-27b-it"
:
0.92
,
"meta-llama/Llama-3.1-70B-Instruct"
:
0.96
,
"meta-llama/Llama-3.1-70B-Instruct"
:
0.96
,
"mistralai/Mixtral-8x7B-Instruct-v0.1"
:
0.6
4
,
"mistralai/Mixtral-8x7B-Instruct-v0.1"
:
0.6
3
,
"Qwen/Qwen2-57B-A14B-Instruct"
:
0.87
,
"Qwen/Qwen2-57B-A14B-Instruct"
:
0.87
,
"neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8"
:
0.84
,
"neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8"
:
0.84
,
"neuralmagic/Mistral-7B-Instruct-v0.3-FP8"
:
0.54
,
"neuralmagic/Mistral-7B-Instruct-v0.3-FP8"
:
0.54
,
...
@@ -36,7 +36,7 @@ MODEL_SCORE_THRESHOLDS = {
...
@@ -36,7 +36,7 @@ MODEL_SCORE_THRESHOLDS = {
"neuralmagic/Qwen2-72B-Instruct-FP8"
:
0.95
,
"neuralmagic/Qwen2-72B-Instruct-FP8"
:
0.95
,
"neuralmagic/Qwen2-57B-A14B-Instruct-FP8"
:
0.82
,
"neuralmagic/Qwen2-57B-A14B-Instruct-FP8"
:
0.82
,
"hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4"
:
0.84
,
"hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4"
:
0.84
,
"hugging-quants/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4"
:
0.8
4
,
"hugging-quants/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4"
:
0.8
3
,
}
}
...
...
test/srt/test_nightly_human_eval.py
View file @
a4d6d6f1
...
@@ -12,19 +12,28 @@ from sglang.test.test_utils import (
...
@@ -12,19 +12,28 @@ from sglang.test.test_utils import (
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2
,
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2
,
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1
,
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1
,
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2
,
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2
,
DEFAULT_MODEL_NAME_FOR_TEST
,
DEFAULT_URL_FOR_TEST
,
DEFAULT_URL_FOR_TEST
,
is_in_ci
,
)
)
class
TestEvalAccuracyLarge
(
unittest
.
TestCase
):
class
TestEvalAccuracyLarge
(
unittest
.
TestCase
):
@
classmethod
@
classmethod
def
setUpClass
(
cls
):
def
setUpClass
(
cls
):
cls
.
model_groups
=
[
if
is_in_ci
():
(
parse_models
(
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1
),
False
,
False
),
cls
.
model_groups
=
[([
DEFAULT_MODEL_NAME_FOR_TEST
],
False
,
False
)]
(
parse_models
(
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2
),
False
,
True
),
else
:
(
parse_models
(
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1
),
True
,
False
),
cls
.
model_groups
=
[
(
parse_models
(
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2
),
True
,
True
),
(
parse_models
(
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1
),
False
,
False
),
]
(
parse_models
(
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2
),
False
,
True
),
(
parse_models
(
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1
),
True
,
False
,
),
(
parse_models
(
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2
),
True
,
True
),
]
cls
.
base_url
=
DEFAULT_URL_FOR_TEST
cls
.
base_url
=
DEFAULT_URL_FOR_TEST
cls
.
process
=
None
cls
.
process
=
None
cls
.
eval_process
=
None
cls
.
eval_process
=
None
...
...
test/srt/test_nightly_math_eval.py
0 → 100644
View file @
a4d6d6f1
import
unittest
from
types
import
SimpleNamespace
from
sglang.srt.utils
import
kill_process_tree
from
sglang.test.run_eval
import
run_eval
from
sglang.test.test_utils
import
(
DEFAULT_MODEL_NAME_FOR_TEST
,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
DEFAULT_URL_FOR_TEST
,
popen_launch_server
,
)
class
TestEvalAccuracyLarge
(
unittest
.
TestCase
):
@
classmethod
def
setUpClass
(
cls
):
cls
.
model
=
DEFAULT_MODEL_NAME_FOR_TEST
cls
.
base_url
=
DEFAULT_URL_FOR_TEST
cls
.
process
=
popen_launch_server
(
cls
.
model
,
cls
.
base_url
,
timeout
=
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH
,
other_args
=
[
"--log-level-http"
,
"warning"
],
)
@
classmethod
def
tearDownClass
(
cls
):
kill_process_tree
(
cls
.
process
.
pid
)
def
test_math
(
self
):
args
=
SimpleNamespace
(
base_url
=
self
.
base_url
,
model
=
self
.
model
,
eval_name
=
"math"
,
num_examples
=
5000
,
num_threads
=
1024
,
)
metrics
=
run_eval
(
args
)
self
.
assertGreaterEqual
(
metrics
[
"score"
],
0.519
-
0.02
)
# -2% to account for sampling variance
if
__name__
==
"__main__"
:
unittest
.
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment