test_nightly_text_models_gsm8k_eval.py 4.18 KB
Newer Older
1
import json
2
import unittest
3
import warnings
4
5
from types import SimpleNamespace

6
from sglang.srt.utils import kill_process_tree
7
8
9
10
11
12
13
14
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1,
    DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2,
    DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1,
    DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2,
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
Mick's avatar
Mick committed
15
16
    check_evaluation_test_results,
    parse_models,
17
    popen_launch_server,
Mick's avatar
Mick committed
18
    write_results_to_json,
19
20
)

21
MODEL_SCORE_THRESHOLDS = {
22
    "meta-llama/Llama-3.1-8B-Instruct": 0.82,
23
    "mistralai/Mistral-7B-Instruct-v0.3": 0.58,
24
    "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct": 0.85,
25
    "google/gemma-2-27b-it": 0.91,
26
    "meta-llama/Llama-3.1-70B-Instruct": 0.95,
27
    "mistralai/Mixtral-8x7B-Instruct-v0.1": 0.616,
28
    "Qwen/Qwen2-57B-A14B-Instruct": 0.86,
29
    "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8": 0.83,
30
    "neuralmagic/Mistral-7B-Instruct-v0.3-FP8": 0.54,
Mick's avatar
Mick committed
31
    "neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8": 0.835,
32
    "zai-org/GLM-4.5-Air-FP8": 0.75,
33
34
35
    # The threshold of neuralmagic/gemma-2-2b-it-FP8 should be 0.6, but this model has some accuracy regression.
    # The fix is tracked at https://github.com/sgl-project/sglang/issues/4324, we set it to 0.50, for now, to make CI green.
    "neuralmagic/gemma-2-2b-it-FP8": 0.50,
36
    "neuralmagic/Meta-Llama-3.1-70B-Instruct-FP8": 0.94,
Lianmin Zheng's avatar
Lianmin Zheng committed
37
    "neuralmagic/Mixtral-8x7B-Instruct-v0.1-FP8": 0.65,
38
    "neuralmagic/Qwen2-72B-Instruct-FP8": 0.94,
39
    "neuralmagic/Qwen2-57B-A14B-Instruct-FP8": 0.82,
40
41
}

42

43
44
# Do not use `CustomTestCase` since `test_mgsm_en_all_models` does not want retry
class TestNightlyGsm8KEval(unittest.TestCase):
45
46
47
48
49
50
51
52
53
54
55
    @classmethod
    def setUpClass(cls):
        cls.model_groups = [
            (parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1), False, False),
            (parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2), False, True),
            (parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1), True, False),
            (parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2), True, True),
        ]
        cls.base_url = DEFAULT_URL_FOR_TEST

    def test_mgsm_en_all_models(self):
56
57
58
        warnings.filterwarnings(
            "ignore", category=ResourceWarning, message="unclosed.*socket"
        )
59
60
        is_first = True
        all_results = []
Mick's avatar
Mick committed
61
        model_count = 0
62
63
        for model_group, is_fp8, is_tp2 in self.model_groups:
            for model in model_group:
Mick's avatar
Mick committed
64
                model_count += 1
65
                with self.subTest(model=model):
Mick's avatar
Mick committed
66
67
68
69
70
71
                    process = popen_launch_server(
                        model=model,
                        base_url=self.base_url,
                        other_args=["--tp", "2"] if is_tp2 else [],
                        timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
                    )
72
73
74
75
76
77
78
79
80
81
82
83
84

                    args = SimpleNamespace(
                        base_url=self.base_url,
                        model=model,
                        eval_name="mgsm_en",
                        num_examples=None,
                        num_threads=1024,
                    )

                    metrics = run_eval(args)
                    print(
                        f"{'=' * 42}\n{model} - metrics={metrics} score={metrics['score']}\n{'=' * 42}\n"
                    )
85
86
87
88

                    write_results_to_json(model, metrics, "w" if is_first else "a")
                    is_first = False

Mick's avatar
Mick committed
89
90
                    # 0.0 for empty latency
                    all_results.append((model, metrics["score"], 0.0))
91
                    kill_process_tree(process.pid)
92

93
94
95
96
97
98
99
100
        try:
            with open("results.json", "r") as f:
                print("\nFinal Results from results.json:")
                print(json.dumps(json.load(f), indent=2))
        except Exception as e:
            print(f"Error reading results.json: {e}")

        # Check all scores after collecting all results
Mick's avatar
Mick committed
101
102
103
104
105
106
        check_evaluation_test_results(
            all_results,
            self.__class__.__name__,
            model_accuracy_thresholds=MODEL_SCORE_THRESHOLDS,
            model_count=model_count,
        )
107

108
109
110

if __name__ == "__main__":
    unittest.main()