test_srt_engine.py 4.86 KB
Newer Older
Lianmin Zheng's avatar
Lianmin Zheng committed
1
2
3
4
5
"""
Usage:
python3 -m unittest test_srt_engine.TestSRTEngine.test_3_sync_streaming_combination
"""

6
import asyncio
7
8
import json
import unittest
9
from types import SimpleNamespace
10

James Xu's avatar
James Xu committed
11
12
import torch

13
import sglang as sgl
14
from sglang.srt.hf_transformers_utils import get_tokenizer
15
from sglang.test.few_shot_gsm8k_engine import run_eval
Lianmin Zheng's avatar
Lianmin Zheng committed
16
17
from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
James Xu's avatar
James Xu committed
18
    DEFAULT_SMALL_EMBEDDING_MODEL_NAME_FOR_TEST,
Lianmin Zheng's avatar
Lianmin Zheng committed
19
20
    DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
)
21
22


Lianmin Zheng's avatar
Lianmin Zheng committed
23
class TestSRTEngine(unittest.TestCase):
24

25
    def test_1_engine_runtime_consistency(self):
26
        prompt = "Today is a sunny day and I like"
Lianmin Zheng's avatar
Lianmin Zheng committed
27
        model_path = DEFAULT_SMALL_MODEL_NAME_FOR_TEST
28
29
30

        sampling_params = {"temperature": 0, "max_new_tokens": 8}

31
        engine = sgl.Engine(model_path=model_path, random_seed=42, log_level="error")
32
33
34
35
36
37
38
39
40
41
42
43
44
45
        out1 = engine.generate(prompt, sampling_params)["text"]
        engine.shutdown()

        runtime = sgl.Runtime(model_path=model_path, random_seed=42)
        out2 = json.loads(runtime.generate(prompt, sampling_params))["text"]
        runtime.shutdown()

        print("==== Answer 1 ====")
        print(out1)

        print("==== Answer 2 ====")
        print(out2)
        assert out1 == out2, f"{out1} != {out2}"

46
    def test_2_engine_multiple_generate(self):
47
48
        # just to ensure there is no issue running multiple generate calls
        prompt = "Today is a sunny day and I like"
Lianmin Zheng's avatar
Lianmin Zheng committed
49
        model_path = DEFAULT_SMALL_MODEL_NAME_FOR_TEST
50
51
52

        sampling_params = {"temperature": 0, "max_new_tokens": 8}

53
        engine = sgl.Engine(model_path=model_path, random_seed=42, log_level="error")
54
55
56
57
        engine.generate(prompt, sampling_params)
        engine.generate(prompt, sampling_params)
        engine.shutdown()

58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
    def test_3_sync_streaming_combination(self):

        prompt = "AI safety is..."
        sampling_params = {"temperature": 0.8, "top_p": 0.95}

        async def async_streaming(engine):

            generator = await engine.async_generate(
                prompt, sampling_params, stream=True
            )

            async for output in generator:
                print(output["text"], end="", flush=True)
            print()

        # Create an LLM.
        llm = sgl.Engine(
Lianmin Zheng's avatar
Lianmin Zheng committed
75
            model_path=DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
            log_level="error",
        )

        # 1. sync + non streaming
        print("\n\n==== 1. sync + non streaming ====")
        output = llm.generate(prompt, sampling_params)

        print(output["text"])

        # 2. sync + streaming
        print("\n\n==== 2. sync + streaming ====")
        output_generator = llm.generate(prompt, sampling_params, stream=True)
        for output in output_generator:
            print(output["text"], end="", flush=True)
        print()

        loop = asyncio.get_event_loop()
        # 3. async + non_streaming
        print("\n\n==== 3. async + non streaming ====")
        output = loop.run_until_complete(llm.async_generate(prompt, sampling_params))
        print(output["text"])

        # 4. async + streaming
        print("\n\n==== 4. async + streaming ====")
        loop.run_until_complete(async_streaming(llm))

        llm.shutdown()

    def test_4_gsm8k(self):

        args = SimpleNamespace(
            model_path=DEFAULT_MODEL_NAME_FOR_TEST,
            local_data_path=None,
            num_shots=5,
            num_questions=200,
        )

        metrics = run_eval(args)
        assert metrics["accuracy"] > 0.7

116
117
118
    def test_5_prompt_input_ids_consistency(self):
        prompt = "The capital of UK is"

Lianmin Zheng's avatar
Lianmin Zheng committed
119
        model_path = DEFAULT_SMALL_MODEL_NAME_FOR_TEST
120
121
122
123
124
125
        engine = sgl.Engine(model_path=model_path, random_seed=42, log_level="error")
        sampling_params = {"temperature": 0, "max_new_tokens": 8}
        out1 = engine.generate(prompt, sampling_params)["text"]

        tokenizer = get_tokenizer(model_path)
        token_ids = tokenizer.encode(prompt)
Chayenne's avatar
Chayenne committed
126
127
128
        out2 = engine.generate(input_ids=token_ids, sampling_params=sampling_params)[
            "text"
        ]
129
130
131
132
133
134
135
136
137
138

        engine.shutdown()

        print("==== Answer 1 ====")
        print(out1)

        print("==== Answer 2 ====")
        print(out2)
        assert out1 == out2, f"{out1} != {out2}"

James Xu's avatar
James Xu committed
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
    def test_6_engine_runtime_encode_consistency(self):
        prompt = "Today is a sunny day and I like"
        model_path = DEFAULT_SMALL_EMBEDDING_MODEL_NAME_FOR_TEST

        engine = sgl.Engine(
            model_path=model_path, is_embedding=True, random_seed=42, log_level="error"
        )
        out1 = torch.tensor(engine.encode(prompt)["embedding"])
        engine.shutdown()

        runtime = sgl.Runtime(model_path=model_path, is_embedding=True, random_seed=42)
        out2 = torch.tensor(json.loads(runtime.encode(prompt))["embedding"])
        runtime.shutdown()

        self.assertTrue(torch.allclose(out1, out2, atol=1e-5, rtol=1e-3))

155
156
157

if __name__ == "__main__":
    unittest.main()