test_tokenization_roberta.py 6.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

16

LysandreJik's avatar
LysandreJik committed
17
import json
Aymeric Augustin's avatar
Aymeric Augustin committed
18
import os
19
import unittest
20

21
from transformers.testing_utils import slow
22
from transformers.tokenization_roberta import VOCAB_FILES_NAMES, AddedToken, RobertaTokenizer, RobertaTokenizerFast
Aymeric Augustin's avatar
Aymeric Augustin committed
23

24
from .test_tokenization_common import TokenizerTesterMixin
25
26


27
class RobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
LysandreJik's avatar
LysandreJik committed
28
    tokenizer_class = RobertaTokenizer
29
30
    rust_tokenizer_class = RobertaTokenizerFast
    test_rust_tokenizer = True
31

LysandreJik's avatar
LysandreJik committed
32
    def setUp(self):
Julien Chaumond's avatar
Julien Chaumond committed
33
        super().setUp()
LysandreJik's avatar
LysandreJik committed
34
35

        # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
        vocab = [
            "l",
            "o",
            "w",
            "e",
            "r",
            "s",
            "t",
            "i",
            "d",
            "n",
            "\u0120",
            "\u0120l",
            "\u0120n",
            "\u0120lo",
            "\u0120low",
            "er",
            "\u0120lowest",
            "\u0120newer",
            "\u0120wider",
            "<unk>",
        ]
58
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
59
        merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
LysandreJik's avatar
LysandreJik committed
60
61
        self.special_tokens_map = {"unk_token": "<unk>"}

62
63
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
64
        with open(self.vocab_file, "w", encoding="utf-8") as fp:
thomwolf's avatar
thomwolf committed
65
            fp.write(json.dumps(vocab_tokens) + "\n")
66
        with open(self.merges_file, "w", encoding="utf-8") as fp:
LysandreJik's avatar
LysandreJik committed
67
            fp.write("\n".join(merges))
68

69
70
    def get_tokenizer(self, **kwargs):
        kwargs.update(self.special_tokens_map)
71
        return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
72

73
74
75
76
77
    def get_rust_tokenizer(self, **kwargs):
        kwargs.update(self.special_tokens_map)
        return RobertaTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)

    def get_input_output_texts(self, tokenizer):
78
79
        input_text = "lower newer"
        output_text = "lower newer"
LysandreJik's avatar
LysandreJik committed
80
81
82
        return input_text, output_text

    def test_full_tokenizer(self):
83
        tokenizer = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map)
thomwolf's avatar
thomwolf committed
84
        text = "lower newer"
85
86
        bpe_tokens = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
        tokens = tokenizer.tokenize(text)  # , add_prefix_space=True)
LysandreJik's avatar
LysandreJik committed
87
        self.assertListEqual(tokens, bpe_tokens)
88

LysandreJik's avatar
LysandreJik committed
89
        input_tokens = tokens + [tokenizer.unk_token]
90
        input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
91
        self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
92

LysandreJik's avatar
LysandreJik committed
93
94
    def roberta_dict_integration_testing(self):
        tokenizer = self.get_tokenizer()
95

96
        self.assertListEqual(tokenizer.encode("Hello world!", add_special_tokens=False), [0, 31414, 232, 328, 2])
LysandreJik's avatar
LysandreJik committed
97
        self.assertListEqual(
98
99
            tokenizer.encode("Hello world! c茅c茅 herlolip 418", add_special_tokens=False),
            [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2],
LysandreJik's avatar
LysandreJik committed
100
        )
101

102
    @slow
103
    def test_sequence_builders(self):
104
        tokenizer = self.tokenizer_class.from_pretrained("roberta-base")
105

Lysandre's avatar
Remove  
Lysandre committed
106
107
        text = tokenizer.encode("sequence builders", add_special_tokens=False)
        text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
108

109
110
111
        encoded_text_from_decode = tokenizer.encode(
            "sequence builders", add_special_tokens=True, add_prefix_space=False
        )
112
        encoded_pair_from_decode = tokenizer.encode(
113
            "sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False
114
        )
LysandreJik's avatar
LysandreJik committed
115

116
117
        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
118

LysandreJik's avatar
LysandreJik committed
119
120
        assert encoded_sentence == encoded_text_from_decode
        assert encoded_pair == encoded_pair_from_decode
121
122
123
124
125
126
127
128

    def test_space_encoding(self):
        tokenizer = self.get_tokenizer()

        sequence = "Encode this sequence."
        space_encoding = tokenizer.byte_encoder[" ".encode("utf-8")[0]]

        # Testing encoder arguments
129
        encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False)
130
131
132
133
134
135
136
137
138
139
        first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0]
        self.assertNotEqual(first_char, space_encoding)

        encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
        first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0]
        self.assertEqual(first_char, space_encoding)

        tokenizer.add_special_tokens({"bos_token": "<s>"})
        encoded = tokenizer.encode(sequence, add_special_tokens=True)
        first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0]
140
        self.assertNotEqual(first_char, space_encoding)
141

142
        # Testing spaces after special tokens
143
        mask = "<mask>"
144
145
146
        tokenizer.add_special_tokens(
            {"mask_token": AddedToken(mask, lstrip=True, rstrip=False)}
        )  # mask token has a left space
147
148
149
150
151
152
153
154
155
156
157
158
159
160
        mask_ind = tokenizer.convert_tokens_to_ids(mask)

        sequence = "Encode <mask> sequence"
        sequence_nospace = "Encode <mask>sequence"

        encoded = tokenizer.encode(sequence)
        mask_loc = encoded.index(mask_ind)
        first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
        self.assertEqual(first_char, space_encoding)

        encoded = tokenizer.encode(sequence_nospace)
        mask_loc = encoded.index(mask_ind)
        first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
        self.assertNotEqual(first_char, space_encoding)