test_tokenization_bert_japanese.py 9.84 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

16
17

import os
18
import unittest
19

20
from transformers.testing_utils import custom_tokenizers
21
from transformers.tokenization_bert import WordpieceTokenizer
22
from transformers.tokenization_bert_japanese import (
Aymeric Augustin's avatar
Aymeric Augustin committed
23
    VOCAB_FILES_NAMES,
24
25
    BertJapaneseTokenizer,
    CharacterTokenizer,
Aymeric Augustin's avatar
Aymeric Augustin committed
26
    MecabTokenizer,
27
)
28

29
from .test_tokenization_common import TokenizerTesterMixin
30
31


32
@custom_tokenizers
33
class BertJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
34
35
36
37

    tokenizer_class = BertJapaneseTokenizer

    def setUp(self):
Julien Chaumond's avatar
Julien Chaumond committed
38
        super().setUp()
39

40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
        vocab_tokens = [
            "[UNK]",
            "[CLS]",
            "[SEP]",
            "こんにちは",
            "こん",
            "にちは",
            "ばんは",
            "##こん",
            "##にちは",
            "##ばんは",
            "世界",
            "##世界",
            "、",
            "##、",
            "。",
            "##。",
        ]
58
59
60
61
62

        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))

63
    def get_input_output_texts(self, tokenizer):
64
65
        input_text = "こんにちは、世界。 \nこんばんは、世界。"
        output_text = "こんにちは 、 世界 。 こんばんは 、 世界 。"
66
67
        return input_text, output_text

68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
    def get_clean_sequence(self, tokenizer):
        input_text, output_text = self.get_input_output_texts(tokenizer)
        ids = tokenizer.encode(output_text, add_special_tokens=False)
        text = tokenizer.decode(ids, clean_up_tokenization_spaces=False)
        return text, ids

    def test_pretokenized_inputs(self):
        pass  # TODO add if relevant

    def test_maximum_encoding_length_pair_input(self):
        pass  # TODO add if relevant

    def test_maximum_encoding_length_single_input(self):
        pass  # TODO add if relevant

83
84
85
    def test_full_tokenizer(self):
        tokenizer = self.tokenizer_class(self.vocab_file)

86
87
88
        tokens = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。")
        self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
        self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14])
89

90
91
    def test_mecab_tokenizer_ipadic(self):
        tokenizer = MecabTokenizer(mecab_dic="ipadic")
92
93

        self.assertListEqual(
94
95
96
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
            ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"],
        )
97

98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
    def test_mecab_tokenizer_unidic_lite(self):
        try:
            tokenizer = MecabTokenizer(mecab_dic="unidic_lite")
        except ModuleNotFoundError:
            return

        self.assertListEqual(
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
            ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"],
        )

    def test_mecab_tokenizer_unidic(self):
        try:
            tokenizer = MecabTokenizer(mecab_dic="unidic")
        except ModuleNotFoundError:
            return

        self.assertListEqual(
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
            ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"],
        )

120
    def test_mecab_tokenizer_lower(self):
121
        tokenizer = MecabTokenizer(do_lower_case=True, mecab_dic="ipadic")
122
123

        self.assertListEqual(
124
125
126
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
            ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"],
        )
127

128
129
130
131
132
133
134
135
136
137
138
139
140
141
    def test_mecab_tokenizer_with_option(self):
        try:
            tokenizer = MecabTokenizer(
                do_lower_case=True, normalize_text=False, mecab_option="-d /usr/local/lib/mecab/dic/jumandic"
            )
        except RuntimeError:
            # if dict doesn't exist in the system, previous code raises this error.
            return

        self.assertListEqual(
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
            ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"],
        )

142
    def test_mecab_tokenizer_no_normalize(self):
143
        tokenizer = MecabTokenizer(normalize_text=False, mecab_dic="ipadic")
144
145

        self.assertListEqual(
146
147
148
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
            ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"],
        )
149
150

    def test_wordpiece_tokenizer(self):
151
        vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは" "ばんは", "##こん", "##にちは", "##ばんは"]
152
153
154
155

        vocab = {}
        for (i, token) in enumerate(vocab_tokens):
            vocab[token] = i
156
        tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
157

158
        self.assertListEqual(tokenizer.tokenize(""), [])
159

160
        self.assertListEqual(tokenizer.tokenize("こんにちは"), ["こんにちは"])
161

162
        self.assertListEqual(tokenizer.tokenize("こんばんは"), ["こん", "##ばんは"])
163

164
        self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは"), ["こん", "##ばんは", "[UNK]", "こんにちは"])
165
166

    def test_sequence_builders(self):
167
        tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese")
168

169
170
        text = tokenizer.encode("ありがとう。", add_special_tokens=False)
        text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False)
171
172
173
174
175
176
177
178
179

        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)

        # 2 is for "[CLS]", 3 is for "[SEP]"
        assert encoded_sentence == [2] + text + [3]
        assert encoded_pair == [2] + text + [3] + text_2 + [3]


Julien Chaumond's avatar
Julien Chaumond committed
180
@custom_tokenizers
181
class BertJapaneseCharacterTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
182
183
184
185

    tokenizer_class = BertJapaneseTokenizer

    def setUp(self):
Julien Chaumond's avatar
Julien Chaumond committed
186
        super().setUp()
187

188
        vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
189
190
191
192
193
194

        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))

    def get_tokenizer(self, **kwargs):
195
        return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type="character", **kwargs)
196

197
    def get_input_output_texts(self, tokenizer):
198
199
        input_text = "こんにちは、世界。 \nこんばんは、世界。"
        output_text = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
200
201
        return input_text, output_text

202
203
204
205
206
207
208
209
210
    def test_pretokenized_inputs(self):
        pass  # TODO add if relevant

    def test_maximum_encoding_length_pair_input(self):
        pass  # TODO add if relevant

    def test_maximum_encoding_length_single_input(self):
        pass  # TODO add if relevant

211
    def test_full_tokenizer(self):
212
        tokenizer = self.tokenizer_class(self.vocab_file, subword_tokenizer_type="character")
213

214
215
216
217
218
219
220
        tokens = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。")
        self.assertListEqual(
            tokens, ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"]
        )
        self.assertListEqual(
            tokenizer.convert_tokens_to_ids(tokens), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12]
        )
221
222

    def test_character_tokenizer(self):
223
        vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界" "、", "。"]
224
225
226
227

        vocab = {}
        for (i, token) in enumerate(vocab_tokens):
            vocab[token] = i
228
        tokenizer = CharacterTokenizer(vocab=vocab, unk_token="[UNK]")
229

230
        self.assertListEqual(tokenizer.tokenize(""), [])
231

232
        self.assertListEqual(tokenizer.tokenize("こんにちは"), ["こ", "ん", "に", "ち", "は"])
233

234
        self.assertListEqual(tokenizer.tokenize("こんにちほ"), ["こ", "ん", "に", "ち", "[UNK]"])
235
236

    def test_sequence_builders(self):
237
        tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char")
238

239
240
        text = tokenizer.encode("ありがとう。", add_special_tokens=False)
        text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False)
241
242
243
244
245
246
247

        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)

        # 2 is for "[CLS]", 3 is for "[SEP]"
        assert encoded_sentence == [2] + text + [3]
        assert encoded_pair == [2] + text + [3] + text_2 + [3]