test_tokenization_bert.py 5.03 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
thomwolf's avatar
thomwolf committed
15
from __future__ import absolute_import, division, print_function, unicode_literals
thomwolf's avatar
thomwolf committed
16
17

import os
thomwolf's avatar
thomwolf committed
18
from io import open
thomwolf's avatar
thomwolf committed
19

20
from transformers.tokenization_bert import (
Aymeric Augustin's avatar
Aymeric Augustin committed
21
    VOCAB_FILES_NAMES,
22
23
24
25
26
27
28
    BasicTokenizer,
    BertTokenizer,
    WordpieceTokenizer,
    _is_control,
    _is_punctuation,
    _is_whitespace,
)
thomwolf's avatar
thomwolf committed
29

30
from .test_tokenization_commo import CommonTestCases
31
from .utils import slow
thomwolf's avatar
thomwolf committed
32

33

34
35
36
37
38
39
class BertTokenizationTest(CommonTestCases.CommonTokenizerTester):

    tokenizer_class = BertTokenizer

    def setUp(self):
        super(BertTokenizationTest, self).setUp()
thomwolf's avatar
thomwolf committed
40

41
        vocab_tokens = [
42
43
44
45
46
47
48
49
50
51
52
53
54
            "[UNK]",
            "[CLS]",
            "[SEP]",
            "want",
            "##want",
            "##ed",
            "wa",
            "un",
            "runn",
            "##ing",
            ",",
            "low",
            "lowest",
55
        ]
56
57
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
58
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
thomwolf's avatar
thomwolf committed
59

60
61
    def get_tokenizer(self, **kwargs):
        return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs)
62

63
    def get_input_output_texts(self):
64
65
        input_text = "UNwant\u00E9d,running"
        output_text = "unwanted, running"
66
        return input_text, output_text
67

68
    def test_full_tokenizer(self):
thomwolf's avatar
thomwolf committed
69
        tokenizer = self.tokenizer_class(self.vocab_file)
thomwolf's avatar
thomwolf committed
70

71
        tokens = tokenizer.tokenize("UNwant\u00E9d,running")
72
73
        self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
        self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
74

75
    def test_chinese(self):
thomwolf's avatar
thomwolf committed
76
        tokenizer = BasicTokenizer()
77

78
        self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
79

80
    def test_basic_tokenizer_lower(self):
thomwolf's avatar
thomwolf committed
81
        tokenizer = BasicTokenizer(do_lower_case=True)
thomwolf's avatar
thomwolf committed
82

83
        self.assertListEqual(
84
85
86
            tokenizer.tokenize(" \tHeLLo!how  \n Are yoU?  "), ["hello", "!", "how", "are", "you", "?"]
        )
        self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
thomwolf's avatar
thomwolf committed
87

88
    def test_basic_tokenizer_no_lower(self):
thomwolf's avatar
thomwolf committed
89
        tokenizer = BasicTokenizer(do_lower_case=False)
thomwolf's avatar
thomwolf committed
90

91
        self.assertListEqual(
92
93
            tokenizer.tokenize(" \tHeLLo!how  \n Are yoU?  "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
        )
thomwolf's avatar
thomwolf committed
94

95
    def test_wordpiece_tokenizer(self):
96
        vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
thomwolf's avatar
thomwolf committed
97

98
99
100
        vocab = {}
        for (i, token) in enumerate(vocab_tokens):
            vocab[token] = i
101
        tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
thomwolf's avatar
thomwolf committed
102

103
        self.assertListEqual(tokenizer.tokenize(""), [])
thomwolf's avatar
thomwolf committed
104

105
        self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
106

107
        self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
108

109
    def test_is_whitespace(self):
110
111
112
113
114
        self.assertTrue(_is_whitespace(" "))
        self.assertTrue(_is_whitespace("\t"))
        self.assertTrue(_is_whitespace("\r"))
        self.assertTrue(_is_whitespace("\n"))
        self.assertTrue(_is_whitespace("\u00A0"))
thomwolf's avatar
thomwolf committed
115

116
117
        self.assertFalse(_is_whitespace("A"))
        self.assertFalse(_is_whitespace("-"))
thomwolf's avatar
thomwolf committed
118

119
    def test_is_control(self):
120
        self.assertTrue(_is_control("\u0005"))
thomwolf's avatar
thomwolf committed
121

122
123
124
125
        self.assertFalse(_is_control("A"))
        self.assertFalse(_is_control(" "))
        self.assertFalse(_is_control("\t"))
        self.assertFalse(_is_control("\r"))
thomwolf's avatar
thomwolf committed
126

127
    def test_is_punctuation(self):
128
129
130
131
        self.assertTrue(_is_punctuation("-"))
        self.assertTrue(_is_punctuation("$"))
        self.assertTrue(_is_punctuation("`"))
        self.assertTrue(_is_punctuation("."))
thomwolf's avatar
thomwolf committed
132

133
134
        self.assertFalse(_is_punctuation("A"))
        self.assertFalse(_is_punctuation(" "))
thomwolf's avatar
thomwolf committed
135

136
    @slow
137
    def test_sequence_builders(self):
thomwolf's avatar
thomwolf committed
138
        tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased")
139

Lysandre's avatar
Remove  
Lysandre committed
140
141
        text = tokenizer.encode("sequence builders", add_special_tokens=False)
        text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
142

143
144
        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
145
146
147

        assert encoded_sentence == [101] + text + [102]
        assert encoded_pair == [101] + text + [102] + text_2 + [102]