tokenization_test.py 4.02 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
20
import unittest
thomwolf's avatar
thomwolf committed
21

thomwolf's avatar
thomwolf committed
22
23
from pytorch_pretrained_bert.tokenization import (BertTokenizer, BasicTokenizer, WordpieceTokenizer,
                                                  _is_whitespace, _is_control, _is_punctuation)
thomwolf's avatar
thomwolf committed
24
25


26
class TokenizationTest(unittest.TestCase):
thomwolf's avatar
thomwolf committed
27

28
29
30
31
32
    def test_full_tokenizer(self):
        vocab_tokens = [
            "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
            "##ing", ","
        ]
33
        with open("/tmp/bert_tokenizer_test.txt", "w") as vocab_writer:
34
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
thomwolf's avatar
thomwolf committed
35

36
            vocab_file = vocab_writer.name
thomwolf's avatar
thomwolf committed
37

thomwolf's avatar
thomwolf committed
38
        tokenizer = BertTokenizer(vocab_file)
39
        os.remove(vocab_file)
thomwolf's avatar
thomwolf committed
40

41
        tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
42
        self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
43

44
        self.assertListEqual(
45
            tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
thomwolf's avatar
thomwolf committed
46

47
    def test_chinese(self):
thomwolf's avatar
thomwolf committed
48
        tokenizer = BasicTokenizer()
49
50
51
52
53
    
        self.assertListEqual(
            tokenizer.tokenize(u"ah\u535A\u63A8zz"),
            [u"ah", u"\u535A", u"\u63A8", u"zz"])  

54
    def test_basic_tokenizer_lower(self):
thomwolf's avatar
thomwolf committed
55
        tokenizer = BasicTokenizer(do_lower_case=True)
thomwolf's avatar
thomwolf committed
56

57
        self.assertListEqual(
58
59
            tokenizer.tokenize(u" \tHeLLo!how  \n Are yoU?  "),
            ["hello", "!", "how", "are", "you", "?"])
60
        self.assertListEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
thomwolf's avatar
thomwolf committed
61

62
    def test_basic_tokenizer_no_lower(self):
thomwolf's avatar
thomwolf committed
63
        tokenizer = BasicTokenizer(do_lower_case=False)
thomwolf's avatar
thomwolf committed
64

65
        self.assertListEqual(
66
67
            tokenizer.tokenize(u" \tHeLLo!how  \n Are yoU?  "),
            ["HeLLo", "!", "how", "Are", "yoU", "?"])
thomwolf's avatar
thomwolf committed
68

69
70
71
72
73
    def test_wordpiece_tokenizer(self):
        vocab_tokens = [
            "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
            "##ing"
        ]
thomwolf's avatar
thomwolf committed
74

75
76
77
        vocab = {}
        for (i, token) in enumerate(vocab_tokens):
            vocab[token] = i
thomwolf's avatar
thomwolf committed
78
        tokenizer = WordpieceTokenizer(vocab=vocab)
thomwolf's avatar
thomwolf committed
79

80
        self.assertListEqual(tokenizer.tokenize(""), [])
thomwolf's avatar
thomwolf committed
81

82
        self.assertListEqual(
83
84
            tokenizer.tokenize("unwanted running"),
            ["un", "##want", "##ed", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
85

86
        self.assertListEqual(
87
            tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
88

89
    def test_is_whitespace(self):
thomwolf's avatar
thomwolf committed
90
91
92
93
94
        self.assertTrue(_is_whitespace(u" "))
        self.assertTrue(_is_whitespace(u"\t"))
        self.assertTrue(_is_whitespace(u"\r"))
        self.assertTrue(_is_whitespace(u"\n"))
        self.assertTrue(_is_whitespace(u"\u00A0"))
thomwolf's avatar
thomwolf committed
95

thomwolf's avatar
thomwolf committed
96
97
        self.assertFalse(_is_whitespace(u"A"))
        self.assertFalse(_is_whitespace(u"-"))
thomwolf's avatar
thomwolf committed
98

99
    def test_is_control(self):
thomwolf's avatar
thomwolf committed
100
        self.assertTrue(_is_control(u"\u0005"))
thomwolf's avatar
thomwolf committed
101

thomwolf's avatar
thomwolf committed
102
103
104
105
        self.assertFalse(_is_control(u"A"))
        self.assertFalse(_is_control(u" "))
        self.assertFalse(_is_control(u"\t"))
        self.assertFalse(_is_control(u"\r"))
thomwolf's avatar
thomwolf committed
106

107
    def test_is_punctuation(self):
thomwolf's avatar
thomwolf committed
108
109
110
111
        self.assertTrue(_is_punctuation(u"-"))
        self.assertTrue(_is_punctuation(u"$"))
        self.assertTrue(_is_punctuation(u"`"))
        self.assertTrue(_is_punctuation(u"."))
thomwolf's avatar
thomwolf committed
112

thomwolf's avatar
thomwolf committed
113
114
        self.assertFalse(_is_punctuation(u"A"))
        self.assertFalse(_is_punctuation(u" "))
thomwolf's avatar
thomwolf committed
115
116


117
118
if __name__ == '__main__':
    unittest.main()