Unverified Commit 13f1357e authored by Lianmin Zheng's avatar Lianmin Zheng Committed by GitHub
Browse files

Add a unit test for data parallelism (#1489)

parent 2a99993c
......@@ -228,7 +228,7 @@ jobs:
cd human-eval
pip install -e .
- name: Evaluate Accuracy
- name: Evaluate Accuracy (TP=2)
timeout-minutes: 20
run: |
cd test/srt
......@@ -240,6 +240,12 @@ jobs:
cd test/srt
python3 test_mla.py
- name: Evaluate Data Parallelism Accuracy (TP=2)
timeout-minutes: 10
run: |
cd test/srt
python3 test_data_parallelism.py
finish:
needs: [
unit-test-frontend, unit-test-backend-part-1, unit-test-backend-part-2, unit-test-backend-part-3,
......
import unittest
from types import SimpleNamespace
from sglang.srt.utils import kill_child_process
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
DEFAULT_MODEL_NAME_FOR_TEST,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
popen_launch_server,
)
class TestDataParallelism(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = DEFAULT_MODEL_NAME_FOR_TEST
cls.base_url = DEFAULT_URL_FOR_TEST
cls.process = popen_launch_server(
cls.model,
cls.base_url,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
other_args=["--dp", "2"],
)
@classmethod
def tearDownClass(cls):
kill_child_process(cls.process.pid)
def test_mmlu(self):
args = SimpleNamespace(
base_url=self.base_url,
model=self.model,
eval_name="mmlu",
num_examples=64,
num_threads=32,
)
metrics = run_eval(args)
assert metrics["score"] >= 0.65
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment