test_skip_tokenizer_init.py 8.12 KB
Newer Older
1
2
3
4
5
"""
python3 -m unittest test_skip_tokenizer_init.TestSkipTokenizerInit.test_parallel_sample
python3 -m unittest test_skip_tokenizer_init.TestSkipTokenizerInit.run_decode_stream
"""

6
7
import json
import unittest
8
from io import BytesIO
9
10

import requests
11
12
from PIL import Image
from transformers import AutoProcessor, AutoTokenizer
13

14
from sglang.lang.chat_template import get_chat_template_by_model_path
15
from sglang.srt.utils import kill_process_tree
16
from sglang.test.test_utils import (
17
    DEFAULT_IMAGE_URL,
Lianmin Zheng's avatar
Lianmin Zheng committed
18
    DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
19
    DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
20
21
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
22
    CustomTestCase,
23
24
    popen_launch_server,
)
25
26


27
class TestSkipTokenizerInit(CustomTestCase):
28
29
30
31
32
33
34
35
36
37
    @classmethod
    def setUpClass(cls):
        cls.model = DEFAULT_SMALL_MODEL_NAME_FOR_TEST
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=["--skip-tokenizer-init", "--stream-output"],
        )
38
        cls.eos_token_id = [119690]
39
40
41
42
43
44
45
46
        cls.tokenizer = AutoTokenizer.from_pretrained(
            DEFAULT_SMALL_MODEL_NAME_FOR_TEST, use_fast=False
        )

    @classmethod
    def tearDownClass(cls):
        kill_process_tree(cls.process.pid)

47
48
49
50
51
52
53
54
    def run_decode(
        self,
        prompt_text="The capital of France is",
        max_new_tokens=32,
        return_logprob=False,
        top_logprobs_num=0,
        n=1,
    ):
55
        input_ids = self.get_input_ids(prompt_text)
56

57
58
59
60
61
62
63
64
        request = self.get_request_json(
            input_ids=input_ids,
            return_logprob=return_logprob,
            top_logprobs_num=top_logprobs_num,
            max_new_tokens=max_new_tokens,
            stream=False,
            n=n,
        )
65
        response = requests.post(
66
            self.base_url + "/generate",
67
            json=request,
68
        )
69
        ret = response.json()
70
        print(json.dumps(ret, indent=2))
71
72

        def assert_one_item(item):
73
74
75
            if item["meta_info"]["finish_reason"]["type"] == "stop":
                self.assertEqual(
                    item["meta_info"]["finish_reason"]["matched"],
76
                    self.tokenizer.eos_token_id,
77
78
79
                )
            elif item["meta_info"]["finish_reason"]["type"] == "length":
                self.assertEqual(
80
                    len(item["output_ids"]), item["meta_info"]["completion_tokens"]
81
                )
82
                self.assertEqual(len(item["output_ids"]), max_new_tokens)
83
84
85
                self.assertEqual(item["meta_info"]["prompt_tokens"], len(input_ids))

                if return_logprob:
86
87
88
                    num_input_logprobs = len(input_ids) - request["logprob_start_len"]
                    if num_input_logprobs > len(input_ids):
                        num_input_logprobs -= len(input_ids)
89
90
                    self.assertEqual(
                        len(item["meta_info"]["input_token_logprobs"]),
91
                        num_input_logprobs,
92
93
94
95
96
97
98
99
                        f'{len(item["meta_info"]["input_token_logprobs"])} mismatch with {len(input_ids)}',
                    )
                    self.assertEqual(
                        len(item["meta_info"]["output_token_logprobs"]),
                        max_new_tokens,
                    )

        # Determine whether to assert a single item or multiple items based on n
100
101
102
        if n == 1:
            assert_one_item(ret)
        else:
103
            self.assertEqual(len(ret), n)
104
105
106
            for i in range(n):
                assert_one_item(ret[i])

107
108
        print("=" * 100)

109
110
    def run_decode_stream(self, return_logprob=False, top_logprobs_num=0, n=1):
        max_new_tokens = 32
111
        input_ids = self.get_input_ids("The capital of France is")
112
113
114
        requests.post(self.base_url + "/flush_cache")
        response = requests.post(
            self.base_url + "/generate",
115
116
117
118
119
120
121
122
            json=self.get_request_json(
                input_ids=input_ids,
                max_new_tokens=max_new_tokens,
                return_logprob=return_logprob,
                top_logprobs_num=top_logprobs_num,
                stream=False,
                n=n,
            ),
123
124
125
126
        )
        ret = response.json()
        print(json.dumps(ret))
        output_ids = ret["output_ids"]
127
128
129
        print("output from non-streaming request:")
        print(output_ids)
        print(self.tokenizer.decode(output_ids, skip_special_tokens=True))
130
131
132
133

        requests.post(self.base_url + "/flush_cache")
        response_stream = requests.post(
            self.base_url + "/generate",
134
135
136
137
138
139
140
            json=self.get_request_json(
                input_ids=input_ids,
                return_logprob=return_logprob,
                top_logprobs_num=top_logprobs_num,
                stream=True,
                n=n,
            ),
141
142
143
144
        )

        response_stream_json = []
        for line in response_stream.iter_lines():
145
            print(line)
146
147
148
149
150
151
152
            if line.startswith(b"data: ") and line[6:] != b"[DONE]":
                response_stream_json.append(json.loads(line[6:]))
        out_stream_ids = []
        for x in response_stream_json:
            out_stream_ids += x["output_ids"]
        print("output from streaming request:")
        print(out_stream_ids)
153
154
        print(self.tokenizer.decode(out_stream_ids, skip_special_tokens=True))

155
156
        assert output_ids == out_stream_ids

157
158
159
160
161
162
163
164
    def test_simple_decode(self):
        self.run_decode()

    def test_parallel_sample(self):
        self.run_decode(n=3)

    def test_logprob(self):
        for top_logprobs_num in [0, 3]:
165
166
167
168
            self.run_decode(return_logprob=True, top_logprobs_num=top_logprobs_num)

    def test_eos_behavior(self):
        self.run_decode(max_new_tokens=256)
169

170
171
172
    def test_simple_decode_stream(self):
        self.run_decode_stream()

173
174
175
176
177
178
    def get_input_ids(self, prompt_text) -> list[int]:
        input_ids = self.tokenizer(prompt_text, return_tensors="pt")["input_ids"][
            0
        ].tolist()
        return input_ids

179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
    def get_request_json(
        self,
        input_ids,
        max_new_tokens=32,
        return_logprob=False,
        top_logprobs_num=0,
        stream=False,
        n=1,
    ):
        return {
            "input_ids": input_ids,
            "sampling_params": {
                "temperature": 0 if n == 1 else 0.5,
                "max_new_tokens": max_new_tokens,
                "n": n,
                "stop_token_ids": self.eos_token_id,
            },
            "stream": stream,
            "return_logprob": return_logprob,
            "top_logprobs_num": top_logprobs_num,
            "logprob_start_len": 0,
        }

202
203
204
205
206
207
208

class TestSkipTokenizerInitVLM(TestSkipTokenizerInit):
    @classmethod
    def setUpClass(cls):
        cls.image_url = DEFAULT_IMAGE_URL
        response = requests.get(cls.image_url)
        cls.image = Image.open(BytesIO(response.content))
209
        cls.model = DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
        cls.tokenizer = AutoTokenizer.from_pretrained(cls.model, use_fast=False)
        cls.processor = AutoProcessor.from_pretrained(cls.model, trust_remote_code=True)
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=["--skip-tokenizer-init"],
        )
        cls.eos_token_id = [cls.tokenizer.eos_token_id]

    def get_input_ids(self, _prompt_text) -> list[int]:
        chat_template = get_chat_template_by_model_path(self.model)
        text = f"{chat_template.image_token}What is in this picture?"
        inputs = self.processor(
            text=[text],
            images=[self.image],
            return_tensors="pt",
        )

        return inputs.input_ids[0].tolist()

232
233
234
235
236
237
238
239
    def get_request_json(self, *args, **kwargs):
        ret = super().get_request_json(*args, **kwargs)
        ret["image_data"] = [self.image_url]
        ret["logprob_start_len"] = (
            -1
        )  # Do not try to calculate logprobs of image embeddings.
        return ret

240
241
242
243
    def test_simple_decode_stream(self):
        # TODO mick
        pass

244
245

if __name__ == "__main__":
Lianmin Zheng's avatar
Lianmin Zheng committed
246
    unittest.main()