test_tokenizer.py 2.74 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import pytest
from transformers import AutoTokenizer, PreTrainedTokenizerBase

from vllm.lora.request import LoRARequest
from vllm.transformers_utils.tokenizer import TokenizerGroup, get_lora_tokenizer


@pytest.mark.asyncio
async def test_transformers_tokenizer():
    reference_tokenizer = AutoTokenizer.from_pretrained("gpt2")
    tokenizer = TokenizerGroup(
        tokenizer_id="gpt2",
        enable_lora=False,
        max_num_seqs=1,
        max_input_length=None,
    )
    assert reference_tokenizer.encode("prompt") == tokenizer.encode(
        request_id="request_id", prompt="prompt", lora_request=None)
    assert reference_tokenizer.encode(
        "prompt") == await tokenizer.encode_async(request_id="request_id",
                                                  prompt="prompt",
                                                  lora_request=None)
    assert isinstance(tokenizer.get_lora_tokenizer(None),
                      PreTrainedTokenizerBase)
    assert tokenizer.get_lora_tokenizer(
        None) == await tokenizer.get_lora_tokenizer_async(None)


@pytest.mark.asyncio
async def test_transformers_tokenizer_lora(sql_lora_files):
    reference_tokenizer = AutoTokenizer.from_pretrained(sql_lora_files)
    tokenizer = TokenizerGroup(
        tokenizer_id="gpt2",
        enable_lora=True,
        max_num_seqs=1,
        max_input_length=None,
    )
    lora_request = LoRARequest("1", 1, sql_lora_files)
    assert reference_tokenizer.encode("prompt") == tokenizer.encode(
        request_id="request_id", prompt="prompt", lora_request=lora_request)
    assert reference_tokenizer.encode(
        "prompt") == await tokenizer.encode_async(request_id="request_id",
                                                  prompt="prompt",
                                                  lora_request=lora_request)
    assert isinstance(tokenizer.get_lora_tokenizer(None),
                      PreTrainedTokenizerBase)
    assert tokenizer.get_lora_tokenizer(
        None) == await tokenizer.get_lora_tokenizer_async(None)

    assert isinstance(tokenizer.get_lora_tokenizer(lora_request),
                      PreTrainedTokenizerBase)
    assert tokenizer.get_lora_tokenizer(
        lora_request) != tokenizer.get_lora_tokenizer(None)
    assert tokenizer.get_lora_tokenizer(
        lora_request) == await tokenizer.get_lora_tokenizer_async(lora_request)


def test_get_lora_tokenizer(sql_lora_files, tmpdir):
    lora_request = None
    tokenizer = get_lora_tokenizer(lora_request)
    assert not tokenizer

    lora_request = LoRARequest("1", 1, sql_lora_files)
    tokenizer = get_lora_tokenizer(lora_request)
    assert tokenizer.get_added_vocab()

    lora_request = LoRARequest("1", 1, str(tmpdir))
    tokenizer = get_lora_tokenizer(lora_request)
    assert not tokenizer