test_skip_tokenizer_init.py 2.8 KB
Newer Older
1
2
3
"""
python3 -m unittest test_skip_tokenizer_init.TestSkipTokenizerInit.test_parallel_sample
"""
Chayenne's avatar
Chayenne committed
4

5
6
7
8
9
import json
import unittest

import requests

10
from sglang.srt.utils import kill_process_tree
11
from sglang.test.test_utils import (
Lianmin Zheng's avatar
Lianmin Zheng committed
12
    DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
13
14
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
15
16
    popen_launch_server,
)
17
18


Lianmin Zheng's avatar
Lianmin Zheng committed
19
class TestSkipTokenizerInit(unittest.TestCase):
20
21
    @classmethod
    def setUpClass(cls):
Lianmin Zheng's avatar
Lianmin Zheng committed
22
        cls.model = DEFAULT_SMALL_MODEL_NAME_FOR_TEST
23
        cls.base_url = DEFAULT_URL_FOR_TEST
24
        cls.process = popen_launch_server(
25
26
27
28
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=["--skip-tokenizer-init"],
29
30
31
32
        )

    @classmethod
    def tearDownClass(cls):
33
        kill_process_tree(cls.process.pid)
34

Lianmin Zheng's avatar
Lianmin Zheng committed
35
    def run_decode(self, return_logprob=False, top_logprobs_num=0, n=1):
36
37
        max_new_tokens = 32
        input_ids = [128000, 791, 6864, 315, 9822, 374]  # The capital of France is
38
39
40
        response = requests.post(
            self.base_url + "/generate",
            json={
41
                "input_ids": input_ids,
42
43
                "sampling_params": {
                    "temperature": 0 if n == 1 else 0.5,
44
                    "max_new_tokens": max_new_tokens,
45
46
47
48
49
50
51
52
53
                    "n": n,
                    "stop_token_ids": [119690],
                },
                "stream": False,
                "return_logprob": return_logprob,
                "top_logprobs_num": top_logprobs_num,
                "logprob_start_len": 0,
            },
        )
54
55
56
57
        ret = response.json()
        print(json.dumps(ret))

        def assert_one_item(item):
58
59
60
61
            self.assertEqual(
                len(item["token_ids"]), item["meta_info"]["completion_tokens"]
            )
            self.assertEqual(len(item["token_ids"]), max_new_tokens)
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
            assert item["meta_info"]["prompt_tokens"] == len(input_ids)

            if return_logprob:
                assert len(item["meta_info"]["input_token_logprobs"]) == len(
                    input_ids
                ), f'{len(item["meta_info"]["input_token_logprobs"])} vs. f{len(input_ids)}'
                assert len(item["meta_info"]["output_token_logprobs"]) == max_new_tokens

        if n == 1:
            assert_one_item(ret)
        else:
            assert len(ret) == n
            for i in range(n):
                assert_one_item(ret[i])

77
78
79
80
81
82
83
84
85
86
        print("=" * 100)

    def test_simple_decode(self):
        self.run_decode()

    def test_parallel_sample(self):
        self.run_decode(n=3)

    def test_logprob(self):
        for top_logprobs_num in [0, 3]:
Lianmin Zheng's avatar
Lianmin Zheng committed
87
88
89
90
            self.run_decode(
                return_logprob=True,
                top_logprobs_num=top_logprobs_num,
            )
91
92
93


if __name__ == "__main__":
Lianmin Zheng's avatar
Lianmin Zheng committed
94
    unittest.main()