tokenization_test.py 4.37 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import tempfile

import tokenization
import tensorflow as tf


class TokenizationTest(tf.test.TestCase):

28
29
30
31
32
33
34
    def test_full_tokenizer(self):
        vocab_tokens = [
            "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
            "##ing", ","
        ]
        with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
thomwolf's avatar
thomwolf committed
35

36
            vocab_file = vocab_writer.name
thomwolf's avatar
thomwolf committed
37

38
39
        tokenizer = tokenization.FullTokenizer(vocab_file)
        os.unlink(vocab_file)
thomwolf's avatar
thomwolf committed
40

41
42
        tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
        self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
43

44
45
        self.assertAllEqual(
            tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
thomwolf's avatar
thomwolf committed
46

47
48
    def test_basic_tokenizer_lower(self):
        tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
thomwolf's avatar
thomwolf committed
49

50
51
52
53
        self.assertAllEqual(
            tokenizer.tokenize(u" \tHeLLo!how  \n Are yoU?  "),
            ["hello", "!", "how", "are", "you", "?"])
        self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
thomwolf's avatar
thomwolf committed
54

55
56
    def test_basic_tokenizer_no_lower(self):
        tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
thomwolf's avatar
thomwolf committed
57

58
59
60
        self.assertAllEqual(
            tokenizer.tokenize(u" \tHeLLo!how  \n Are yoU?  "),
            ["HeLLo", "!", "how", "Are", "yoU", "?"])
thomwolf's avatar
thomwolf committed
61

62
63
64
65
66
    def test_wordpiece_tokenizer(self):
        vocab_tokens = [
            "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
            "##ing"
        ]
thomwolf's avatar
thomwolf committed
67

68
69
70
71
        vocab = {}
        for (i, token) in enumerate(vocab_tokens):
            vocab[token] = i
        tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
thomwolf's avatar
thomwolf committed
72

73
        self.assertAllEqual(tokenizer.tokenize(""), [])
thomwolf's avatar
thomwolf committed
74

75
76
77
        self.assertAllEqual(
            tokenizer.tokenize("unwanted running"),
            ["un", "##want", "##ed", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
78

79
80
        self.assertAllEqual(
            tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
thomwolf's avatar
thomwolf committed
81

82
83
84
85
86
    def test_convert_tokens_to_ids(self):
        vocab_tokens = [
            "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
            "##ing"
        ]
thomwolf's avatar
thomwolf committed
87

88
89
90
        vocab = {}
        for (i, token) in enumerate(vocab_tokens):
            vocab[token] = i
thomwolf's avatar
thomwolf committed
91

92
93
94
        self.assertAllEqual(
            tokenization.convert_tokens_to_ids(
                vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
thomwolf's avatar
thomwolf committed
95

96
97
98
99
100
101
    def test_is_whitespace(self):
        self.assertTrue(tokenization._is_whitespace(u" "))
        self.assertTrue(tokenization._is_whitespace(u"\t"))
        self.assertTrue(tokenization._is_whitespace(u"\r"))
        self.assertTrue(tokenization._is_whitespace(u"\n"))
        self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
thomwolf's avatar
thomwolf committed
102

103
104
        self.assertFalse(tokenization._is_whitespace(u"A"))
        self.assertFalse(tokenization._is_whitespace(u"-"))
thomwolf's avatar
thomwolf committed
105

106
107
    def test_is_control(self):
        self.assertTrue(tokenization._is_control(u"\u0005"))
thomwolf's avatar
thomwolf committed
108

109
110
111
112
        self.assertFalse(tokenization._is_control(u"A"))
        self.assertFalse(tokenization._is_control(u" "))
        self.assertFalse(tokenization._is_control(u"\t"))
        self.assertFalse(tokenization._is_control(u"\r"))
thomwolf's avatar
thomwolf committed
113

114
115
116
117
118
    def test_is_punctuation(self):
        self.assertTrue(tokenization._is_punctuation(u"-"))
        self.assertTrue(tokenization._is_punctuation(u"$"))
        self.assertTrue(tokenization._is_punctuation(u"`"))
        self.assertTrue(tokenization._is_punctuation(u"."))
thomwolf's avatar
thomwolf committed
119

120
121
        self.assertFalse(tokenization._is_punctuation(u"A"))
        self.assertFalse(tokenization._is_punctuation(u" "))
thomwolf's avatar
thomwolf committed
122
123
124


if __name__ == "__main__":
125
    tf.test.main()