test_skip_tokenizer_init.py 2.76 KB
Newer Older
1
2
3
"""
python3 -m unittest test_skip_tokenizer_init.TestSkipTokenizerInit.test_parallel_sample
"""
4
5
6
7
8
9
import json
import unittest

import requests

from sglang.srt.utils import kill_child_process
10
11
from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
12
13
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
14
15
    popen_launch_server,
)
16
17


Lianmin Zheng's avatar
Lianmin Zheng committed
18
class TestSkipTokenizerInit(unittest.TestCase):
19
20
21
    @classmethod
    def setUpClass(cls):
        cls.model = DEFAULT_MODEL_NAME_FOR_TEST
22
        cls.base_url = DEFAULT_URL_FOR_TEST
23
        cls.process = popen_launch_server(
24
25
26
27
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=["--skip-tokenizer-init"],
28
29
30
31
        )

    @classmethod
    def tearDownClass(cls):
Lianmin Zheng's avatar
Lianmin Zheng committed
32
        kill_child_process(cls.process.pid, include_self=True)
33

Lianmin Zheng's avatar
Lianmin Zheng committed
34
    def run_decode(self, return_logprob=False, top_logprobs_num=0, n=1):
35
36
        max_new_tokens = 32
        input_ids = [128000, 791, 6864, 315, 9822, 374]  # The capital of France is
37
38
39
        response = requests.post(
            self.base_url + "/generate",
            json={
40
                "input_ids": input_ids,
41
42
                "sampling_params": {
                    "temperature": 0 if n == 1 else 0.5,
43
                    "max_new_tokens": max_new_tokens,
44
45
46
47
48
49
50
51
52
                    "n": n,
                    "stop_token_ids": [119690],
                },
                "stream": False,
                "return_logprob": return_logprob,
                "top_logprobs_num": top_logprobs_num,
                "logprob_start_len": 0,
            },
        )
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
        ret = response.json()
        print(json.dumps(ret))

        def assert_one_item(item):
            assert len(item["token_ids"]) == item["meta_info"]["completion_tokens"]
            assert len(item["token_ids"]) == max_new_tokens
            assert item["meta_info"]["prompt_tokens"] == len(input_ids)

            if return_logprob:
                assert len(item["meta_info"]["input_token_logprobs"]) == len(
                    input_ids
                ), f'{len(item["meta_info"]["input_token_logprobs"])} vs. f{len(input_ids)}'
                assert len(item["meta_info"]["output_token_logprobs"]) == max_new_tokens

        if n == 1:
            assert_one_item(ret)
        else:
            assert len(ret) == n
            for i in range(n):
                assert_one_item(ret[i])

74
75
76
77
78
79
80
81
82
83
        print("=" * 100)

    def test_simple_decode(self):
        self.run_decode()

    def test_parallel_sample(self):
        self.run_decode(n=3)

    def test_logprob(self):
        for top_logprobs_num in [0, 3]:
Lianmin Zheng's avatar
Lianmin Zheng committed
84
85
86
87
            self.run_decode(
                return_logprob=True,
                top_logprobs_num=top_logprobs_num,
            )
88
89
90


if __name__ == "__main__":
Lianmin Zheng's avatar
Lianmin Zheng committed
91
    unittest.main()