test_tokenization_bert.py 6.15 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

thomwolf's avatar
thomwolf committed
16
17

import os
18
import unittest
thomwolf's avatar
thomwolf committed
19

20
from transformers.tokenization_bert import (
Aymeric Augustin's avatar
Aymeric Augustin committed
21
    VOCAB_FILES_NAMES,
22
23
    BasicTokenizer,
    BertTokenizer,
Anthony MOI's avatar
Anthony MOI committed
24
    BertTokenizerFast,
25
26
27
28
29
    WordpieceTokenizer,
    _is_control,
    _is_punctuation,
    _is_whitespace,
)
thomwolf's avatar
thomwolf committed
30

31
from .test_tokenization_common import TokenizerTesterMixin
32
from .utils import slow
thomwolf's avatar
thomwolf committed
33

34

35
class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
36
37

    tokenizer_class = BertTokenizer
Anthony MOI's avatar
Anthony MOI committed
38
    test_rust_tokenizer = True
39
40

    def setUp(self):
Julien Chaumond's avatar
Julien Chaumond committed
41
        super().setUp()
thomwolf's avatar
thomwolf committed
42

43
        vocab_tokens = [
44
45
46
47
48
49
50
51
52
53
54
55
56
            "[UNK]",
            "[CLS]",
            "[SEP]",
            "want",
            "##want",
            "##ed",
            "wa",
            "un",
            "runn",
            "##ing",
            ",",
            "low",
            "lowest",
57
        ]
58
59
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
60
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
thomwolf's avatar
thomwolf committed
61

62
63
    def get_tokenizer(self, **kwargs):
        return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs)
64

Anthony MOI's avatar
Anthony MOI committed
65
66
67
    def get_rust_tokenizer(self, **kwargs):
        return BertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)

68
    def get_input_output_texts(self):
69
70
        input_text = "UNwant\u00E9d,running"
        output_text = "unwanted, running"
71
        return input_text, output_text
72

73
    def test_full_tokenizer(self):
thomwolf's avatar
thomwolf committed
74
        tokenizer = self.tokenizer_class(self.vocab_file)
thomwolf's avatar
thomwolf committed
75

76
        tokens = tokenizer.tokenize("UNwant\u00E9d,running")
77
78
        self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
        self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
79

Anthony MOI's avatar
Anthony MOI committed
80
81
82
83
84
    def test_rust_and_python_full_tokenizers(self):
        if not self.test_rust_tokenizer:
            return

        tokenizer = self.get_tokenizer()
Funtowicz Morgan's avatar
Funtowicz Morgan committed
85
        rust_tokenizer = self.get_rust_tokenizer()
Anthony MOI's avatar
Anthony MOI committed
86

87
        sequence = "UNwant\u00E9d,running"
Anthony MOI's avatar
Anthony MOI committed
88
89
90
91
92
93

        tokens = tokenizer.tokenize(sequence)
        rust_tokens = rust_tokenizer.tokenize(sequence)
        self.assertListEqual(tokens, rust_tokens)

        ids = tokenizer.encode(sequence, add_special_tokens=False)
Funtowicz Morgan's avatar
Funtowicz Morgan committed
94
        rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
Anthony MOI's avatar
Anthony MOI committed
95
96
97
98
99
100
101
        self.assertListEqual(ids, rust_ids)

        rust_tokenizer = self.get_rust_tokenizer()
        ids = tokenizer.encode(sequence)
        rust_ids = rust_tokenizer.encode(sequence)
        self.assertListEqual(ids, rust_ids)

102
    def test_chinese(self):
thomwolf's avatar
thomwolf committed
103
        tokenizer = BasicTokenizer()
104

105
        self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
106

107
    def test_basic_tokenizer_lower(self):
thomwolf's avatar
thomwolf committed
108
        tokenizer = BasicTokenizer(do_lower_case=True)
thomwolf's avatar
thomwolf committed
109

110
        self.assertListEqual(
111
112
113
            tokenizer.tokenize(" \tHeLLo!how  \n Are yoU?  "), ["hello", "!", "how", "are", "you", "?"]
        )
        self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
thomwolf's avatar
thomwolf committed
114

115
    def test_basic_tokenizer_no_lower(self):
thomwolf's avatar
thomwolf committed
116
        tokenizer = BasicTokenizer(do_lower_case=False)
thomwolf's avatar
thomwolf committed
117

118
        self.assertListEqual(
119
120
            tokenizer.tokenize(" \tHeLLo!how  \n Are yoU?  "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
        )
thomwolf's avatar
thomwolf committed
121

122
123
124
125
126
127
128
    def test_basic_tokenizer_respects_never_split_tokens(self):
        tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])

        self.assertListEqual(
            tokenizer.tokenize(" \tHeLLo!how  \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
        )

129
    def test_wordpiece_tokenizer(self):
130
        vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
thomwolf's avatar
thomwolf committed
131

132
133
134
        vocab = {}
        for (i, token) in enumerate(vocab_tokens):
            vocab[token] = i
135
        tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
thomwolf's avatar
thomwolf committed
136

137
        self.assertListEqual(tokenizer.tokenize(""), [])
thomwolf's avatar
thomwolf committed
138

139
        self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
140

141
        self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
142

143
    def test_is_whitespace(self):
144
145
146
147
148
        self.assertTrue(_is_whitespace(" "))
        self.assertTrue(_is_whitespace("\t"))
        self.assertTrue(_is_whitespace("\r"))
        self.assertTrue(_is_whitespace("\n"))
        self.assertTrue(_is_whitespace("\u00A0"))
thomwolf's avatar
thomwolf committed
149

150
151
        self.assertFalse(_is_whitespace("A"))
        self.assertFalse(_is_whitespace("-"))
thomwolf's avatar
thomwolf committed
152

153
    def test_is_control(self):
154
        self.assertTrue(_is_control("\u0005"))
thomwolf's avatar
thomwolf committed
155

156
157
158
159
        self.assertFalse(_is_control("A"))
        self.assertFalse(_is_control(" "))
        self.assertFalse(_is_control("\t"))
        self.assertFalse(_is_control("\r"))
thomwolf's avatar
thomwolf committed
160

161
    def test_is_punctuation(self):
162
163
164
165
        self.assertTrue(_is_punctuation("-"))
        self.assertTrue(_is_punctuation("$"))
        self.assertTrue(_is_punctuation("`"))
        self.assertTrue(_is_punctuation("."))
thomwolf's avatar
thomwolf committed
166

167
168
        self.assertFalse(_is_punctuation("A"))
        self.assertFalse(_is_punctuation(" "))
thomwolf's avatar
thomwolf committed
169

170
    @slow
171
    def test_sequence_builders(self):
thomwolf's avatar
thomwolf committed
172
        tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased")
173

Lysandre's avatar
Remove  
Lysandre committed
174
175
        text = tokenizer.encode("sequence builders", add_special_tokens=False)
        text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
176

177
178
        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
179
180
181

        assert encoded_sentence == [101] + text + [102]
        assert encoded_pair == [101] + text + [102] + text_2 + [102]