"git@developer.sourcefind.cn:zhaoyu6/sglang.git" did not exist on "5e19b159b02ae1b519ce6c3a0d1e7ff04ab9fcd5"
tokenization_test.py 4.36 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
20
import unittest
thomwolf's avatar
thomwolf committed
21

thomwolf's avatar
thomwolf committed
22
import tokenization
thomwolf's avatar
thomwolf committed
23
24


25
class TokenizationTest(unittest.TestCase):
thomwolf's avatar
thomwolf committed
26

27
28
29
30
31
    def test_full_tokenizer(self):
        vocab_tokens = [
            "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
            "##ing", ","
        ]
32
        with open("/tmp/bert_tokenizer_test.txt", "w") as vocab_writer:
33
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
thomwolf's avatar
thomwolf committed
34

35
            vocab_file = vocab_writer.name
thomwolf's avatar
thomwolf committed
36

37
        tokenizer = tokenization.FullTokenizer(vocab_file)
38
        os.remove(vocab_file)
thomwolf's avatar
thomwolf committed
39

40
        tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
41
        self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
42

43
        self.assertListEqual(
44
            tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
thomwolf's avatar
thomwolf committed
45

46
47
    def test_basic_tokenizer_lower(self):
        tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
thomwolf's avatar
thomwolf committed
48

49
        self.assertListEqual(
50
51
            tokenizer.tokenize(u" \tHeLLo!how  \n Are yoU?  "),
            ["hello", "!", "how", "are", "you", "?"])
52
        self.assertListEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
thomwolf's avatar
thomwolf committed
53

54
55
    def test_basic_tokenizer_no_lower(self):
        tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
thomwolf's avatar
thomwolf committed
56

57
        self.assertListEqual(
58
59
            tokenizer.tokenize(u" \tHeLLo!how  \n Are yoU?  "),
            ["HeLLo", "!", "how", "Are", "yoU", "?"])
thomwolf's avatar
thomwolf committed
60

61
62
63
64
65
    def test_wordpiece_tokenizer(self):
        vocab_tokens = [
            "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
            "##ing"
        ]
thomwolf's avatar
thomwolf committed
66

67
68
69
70
        vocab = {}
        for (i, token) in enumerate(vocab_tokens):
            vocab[token] = i
        tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
thomwolf's avatar
thomwolf committed
71

72
        self.assertListEqual(tokenizer.tokenize(""), [])
thomwolf's avatar
thomwolf committed
73

74
        self.assertListEqual(
75
76
            tokenizer.tokenize("unwanted running"),
            ["un", "##want", "##ed", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
77

78
        self.assertListEqual(
79
            tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
80

81
82
83
84
85
    def test_convert_tokens_to_ids(self):
        vocab_tokens = [
            "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
            "##ing"
        ]
thomwolf's avatar
thomwolf committed
86

87
88
89
        vocab = {}
        for (i, token) in enumerate(vocab_tokens):
            vocab[token] = i
thomwolf's avatar
thomwolf committed
90

91
        self.assertListEqual(
92
93
            tokenization.convert_tokens_to_ids(
                vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
thomwolf's avatar
thomwolf committed
94

95
96
97
98
99
100
    def test_is_whitespace(self):
        self.assertTrue(tokenization._is_whitespace(u" "))
        self.assertTrue(tokenization._is_whitespace(u"\t"))
        self.assertTrue(tokenization._is_whitespace(u"\r"))
        self.assertTrue(tokenization._is_whitespace(u"\n"))
        self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
thomwolf's avatar
thomwolf committed
101

102
103
        self.assertFalse(tokenization._is_whitespace(u"A"))
        self.assertFalse(tokenization._is_whitespace(u"-"))
thomwolf's avatar
thomwolf committed
104

105
106
    def test_is_control(self):
        self.assertTrue(tokenization._is_control(u"\u0005"))
thomwolf's avatar
thomwolf committed
107

108
109
110
111
        self.assertFalse(tokenization._is_control(u"A"))
        self.assertFalse(tokenization._is_control(u" "))
        self.assertFalse(tokenization._is_control(u"\t"))
        self.assertFalse(tokenization._is_control(u"\r"))
thomwolf's avatar
thomwolf committed
112

113
114
115
116
117
    def test_is_punctuation(self):
        self.assertTrue(tokenization._is_punctuation(u"-"))
        self.assertTrue(tokenization._is_punctuation(u"$"))
        self.assertTrue(tokenization._is_punctuation(u"`"))
        self.assertTrue(tokenization._is_punctuation(u"."))
thomwolf's avatar
thomwolf committed
118

119
120
        self.assertFalse(tokenization._is_punctuation(u"A"))
        self.assertFalse(tokenization._is_punctuation(u" "))
thomwolf's avatar
thomwolf committed
121
122


123
124
if __name__ == '__main__':
    unittest.main()