test_tokenization_roberta.py 4.26 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
15
from __future__ import absolute_import, division, print_function, unicode_literals
16

LysandreJik's avatar
LysandreJik committed
17
import json
Aymeric Augustin's avatar
Aymeric Augustin committed
18
import os
19
import unittest
thomwolf's avatar
thomwolf committed
20
from io import open
21

Aymeric Augustin's avatar
Aymeric Augustin committed
22
23
from transformers.tokenization_roberta import VOCAB_FILES_NAMES, RobertaTokenizer

LysandreJik's avatar
LysandreJik committed
24
from .tokenization_tests_commons import CommonTestCases
25
from .utils import slow
26
27


LysandreJik's avatar
LysandreJik committed
28
29
class RobertaTokenizationTest(CommonTestCases.CommonTokenizerTester):
    tokenizer_class = RobertaTokenizer
30

LysandreJik's avatar
LysandreJik committed
31
32
33
34
    def setUp(self):
        super(RobertaTokenizationTest, self).setUp()

        # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
        vocab = [
            "l",
            "o",
            "w",
            "e",
            "r",
            "s",
            "t",
            "i",
            "d",
            "n",
            "\u0120",
            "\u0120l",
            "\u0120n",
            "\u0120lo",
            "\u0120low",
            "er",
            "\u0120lowest",
            "\u0120newer",
            "\u0120wider",
            "<unk>",
        ]
57
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
58
        merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
LysandreJik's avatar
LysandreJik committed
59
60
        self.special_tokens_map = {"unk_token": "<unk>"}

61
62
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
63
        with open(self.vocab_file, "w", encoding="utf-8") as fp:
thomwolf's avatar
thomwolf committed
64
            fp.write(json.dumps(vocab_tokens) + "\n")
65
        with open(self.merges_file, "w", encoding="utf-8") as fp:
LysandreJik's avatar
LysandreJik committed
66
            fp.write("\n".join(merges))
67

68
69
70
    def get_tokenizer(self, **kwargs):
        kwargs.update(self.special_tokens_map)
        return RobertaTokenizer.from_pretrained(self.tmpdirname, **kwargs)
71

LysandreJik's avatar
LysandreJik committed
72
    def get_input_output_texts(self):
73
74
        input_text = "lower newer"
        output_text = "lower newer"
LysandreJik's avatar
LysandreJik committed
75
76
77
        return input_text, output_text

    def test_full_tokenizer(self):
78
        tokenizer = RobertaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
thomwolf's avatar
thomwolf committed
79
80
        text = "lower newer"
        bpe_tokens = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
81
        tokens = tokenizer.tokenize(text, add_prefix_space=True)
LysandreJik's avatar
LysandreJik committed
82
        self.assertListEqual(tokens, bpe_tokens)
83

LysandreJik's avatar
LysandreJik committed
84
        input_tokens = tokens + [tokenizer.unk_token]
thomwolf's avatar
thomwolf committed
85
        input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
86
        self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
87

LysandreJik's avatar
LysandreJik committed
88
89
    def roberta_dict_integration_testing(self):
        tokenizer = self.get_tokenizer()
90

91
        self.assertListEqual(tokenizer.encode("Hello world!", add_special_tokens=False), [0, 31414, 232, 328, 2])
LysandreJik's avatar
LysandreJik committed
92
        self.assertListEqual(
93
94
            tokenizer.encode("Hello world! c茅c茅 herlolip 418", add_special_tokens=False),
            [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2],
LysandreJik's avatar
LysandreJik committed
95
        )
96

97
    @slow
98
99
100
    def test_sequence_builders(self):
        tokenizer = RobertaTokenizer.from_pretrained("roberta-base")

Lysandre's avatar
Remove  
Lysandre committed
101
102
        text = tokenizer.encode("sequence builders", add_special_tokens=False)
        text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
103

LysandreJik's avatar
LysandreJik committed
104
        encoded_text_from_decode = tokenizer.encode("sequence builders", add_special_tokens=True)
105
106
107
        encoded_pair_from_decode = tokenizer.encode(
            "sequence builders", "multi-sequence build", add_special_tokens=True
        )
LysandreJik's avatar
LysandreJik committed
108

109
110
        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
111

LysandreJik's avatar
LysandreJik committed
112
113
        assert encoded_sentence == encoded_text_from_decode
        assert encoded_pair == encoded_pair_from_decode
114

115

116
if __name__ == "__main__":
117
    unittest.main()