test_nightly_text_models_gsm8k_eval.py 4.63 KB
Newer Older
1
import json
2
import unittest
3
import warnings
4
5
from types import SimpleNamespace

6
from sglang.srt.utils import kill_process_tree
7
8
9
10
11
12
13
14
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1,
    DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2,
    DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1,
    DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2,
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
15
    ModelLaunchSettings,
Mick's avatar
Mick committed
16
17
    check_evaluation_test_results,
    parse_models,
18
    popen_launch_server,
Mick's avatar
Mick committed
19
    write_results_to_json,
20
21
)

22
MODEL_SCORE_THRESHOLDS = {
23
    "meta-llama/Llama-3.1-8B-Instruct": 0.82,
24
    "mistralai/Mistral-7B-Instruct-v0.3": 0.58,
25
    "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct": 0.85,
26
    "google/gemma-2-27b-it": 0.91,
27
    "meta-llama/Llama-3.1-70B-Instruct": 0.95,
28
    "mistralai/Mixtral-8x7B-Instruct-v0.1": 0.616,
29
    "Qwen/Qwen2-57B-A14B-Instruct": 0.86,
30
    "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8": 0.83,
31
    "neuralmagic/Mistral-7B-Instruct-v0.3-FP8": 0.54,
Mick's avatar
Mick committed
32
    "neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8": 0.835,
33
    "zai-org/GLM-4.5-Air-FP8": 0.75,
34
35
36
    # The threshold of neuralmagic/gemma-2-2b-it-FP8 should be 0.6, but this model has some accuracy regression.
    # The fix is tracked at https://github.com/sgl-project/sglang/issues/4324, we set it to 0.50, for now, to make CI green.
    "neuralmagic/gemma-2-2b-it-FP8": 0.50,
37
    "neuralmagic/Meta-Llama-3.1-70B-Instruct-FP8": 0.94,
Lianmin Zheng's avatar
Lianmin Zheng committed
38
    "neuralmagic/Mixtral-8x7B-Instruct-v0.1-FP8": 0.65,
39
    "neuralmagic/Qwen2-72B-Instruct-FP8": 0.94,
40
    "neuralmagic/Qwen2-57B-A14B-Instruct-FP8": 0.82,
41
42
}

43

44
45
# Do not use `CustomTestCase` since `test_mgsm_en_all_models` does not want retry
class TestNightlyGsm8KEval(unittest.TestCase):
46
47
    @classmethod
    def setUpClass(cls):
48
49
50
51
52
53
54
55
56
57
58
59
60
        cls.models = []
        models_tp1 = parse_models(
            DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1
        ) + parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1)
        for model_path in models_tp1:
            cls.models.append(ModelLaunchSettings(model_path, tp_size=1))

        models_tp2 = parse_models(
            DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2
        ) + parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2)
        for model_path in models_tp2:
            cls.models.append(ModelLaunchSettings(model_path, tp_size=2))

61
62
63
        cls.base_url = DEFAULT_URL_FOR_TEST

    def test_mgsm_en_all_models(self):
64
65
66
        warnings.filterwarnings(
            "ignore", category=ResourceWarning, message="unclosed.*socket"
        )
67
68
        is_first = True
        all_results = []
69
70
71
        for model_setup in self.models:
            with self.subTest(model=model_setup.model_path):
                other_args = list(model_setup.extra_args)
72

73
74
75
76
77
78
79
80
81
82
83
                if model_setup.model_path == "meta-llama/Llama-3.1-70B-Instruct":
                    other_args.extend(["--mem-fraction-static", "0.9"])

                process = popen_launch_server(
                    model=model_setup.model_path,
                    other_args=other_args,
                    base_url=self.base_url,
                    timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
                )

                try:
84
85
                    args = SimpleNamespace(
                        base_url=self.base_url,
86
                        model=model_setup.model_path,
87
88
89
90
91
92
93
                        eval_name="mgsm_en",
                        num_examples=None,
                        num_threads=1024,
                    )

                    metrics = run_eval(args)
                    print(
94
                        f"{'=' * 42}\n{model_setup.model_path} - metrics={metrics} score={metrics['score']}\n{'=' * 42}\n"
95
                    )
96

97
98
99
                    write_results_to_json(
                        model_setup.model_path, metrics, "w" if is_first else "a"
                    )
100
101
                    is_first = False

Mick's avatar
Mick committed
102
                    # 0.0 for empty latency
103
104
                    all_results.append((model_setup.model_path, metrics["score"], 0.0))
                finally:
105
                    kill_process_tree(process.pid)
106

107
108
109
110
111
112
113
114
        try:
            with open("results.json", "r") as f:
                print("\nFinal Results from results.json:")
                print(json.dumps(json.load(f), indent=2))
        except Exception as e:
            print(f"Error reading results.json: {e}")

        # Check all scores after collecting all results
Mick's avatar
Mick committed
115
116
117
118
        check_evaluation_test_results(
            all_results,
            self.__class__.__name__,
            model_accuracy_thresholds=MODEL_SCORE_THRESHOLDS,
119
            model_count=len(self.models),
Mick's avatar
Mick committed
120
        )
121

122
123
124

if __name__ == "__main__":
    unittest.main()