test_skip_tokenizer_init.py 2.67 KB
Newer Older
1
2
3
4
5
6
import json
import unittest

import requests

from sglang.srt.utils import kill_child_process
7
8
from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
9
10
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
11
12
    popen_launch_server,
)
13
14


Lianmin Zheng's avatar
Lianmin Zheng committed
15
class TestSkipTokenizerInit(unittest.TestCase):
16
17
18
    @classmethod
    def setUpClass(cls):
        cls.model = DEFAULT_MODEL_NAME_FOR_TEST
19
        cls.base_url = DEFAULT_URL_FOR_TEST
20
        cls.process = popen_launch_server(
21
22
23
24
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=["--skip-tokenizer-init"],
25
26
27
28
        )

    @classmethod
    def tearDownClass(cls):
Lianmin Zheng's avatar
Lianmin Zheng committed
29
        kill_child_process(cls.process.pid, include_self=True)
30

Lianmin Zheng's avatar
Lianmin Zheng committed
31
    def run_decode(self, return_logprob=False, top_logprobs_num=0, n=1):
32
33
        max_new_tokens = 32
        input_ids = [128000, 791, 6864, 315, 9822, 374]  # The capital of France is
34
35
36
        response = requests.post(
            self.base_url + "/generate",
            json={
37
                "input_ids": input_ids,
38
39
                "sampling_params": {
                    "temperature": 0 if n == 1 else 0.5,
40
                    "max_new_tokens": max_new_tokens,
41
42
43
44
45
46
47
48
49
                    "n": n,
                    "stop_token_ids": [119690],
                },
                "stream": False,
                "return_logprob": return_logprob,
                "top_logprobs_num": top_logprobs_num,
                "logprob_start_len": 0,
            },
        )
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
        ret = response.json()
        print(json.dumps(ret))

        def assert_one_item(item):
            assert len(item["token_ids"]) == item["meta_info"]["completion_tokens"]
            assert len(item["token_ids"]) == max_new_tokens
            assert item["meta_info"]["prompt_tokens"] == len(input_ids)

            if return_logprob:
                assert len(item["meta_info"]["input_token_logprobs"]) == len(
                    input_ids
                ), f'{len(item["meta_info"]["input_token_logprobs"])} vs. f{len(input_ids)}'
                assert len(item["meta_info"]["output_token_logprobs"]) == max_new_tokens

        if n == 1:
            assert_one_item(ret)
        else:
            assert len(ret) == n
            for i in range(n):
                assert_one_item(ret[i])

71
72
73
74
75
76
77
78
79
80
        print("=" * 100)

    def test_simple_decode(self):
        self.run_decode()

    def test_parallel_sample(self):
        self.run_decode(n=3)

    def test_logprob(self):
        for top_logprobs_num in [0, 3]:
Lianmin Zheng's avatar
Lianmin Zheng committed
81
82
83
84
            self.run_decode(
                return_logprob=True,
                top_logprobs_num=top_logprobs_num,
            )
85
86
87


if __name__ == "__main__":
Lianmin Zheng's avatar
Lianmin Zheng committed
88
    unittest.main()