"tests/test_modeling_distilbert.py" did not exist on "9d0a11a68c8ec41a36de3bd5f20b5f083ea4c59e"
test_tokenization_roberta.py 6.09 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

16

LysandreJik's avatar
LysandreJik committed
17
import json
Aymeric Augustin's avatar
Aymeric Augustin committed
18
import os
19
import unittest
20

21
from transformers.tokenization_roberta import VOCAB_FILES_NAMES, AddedToken, RobertaTokenizer, RobertaTokenizerFast
Aymeric Augustin's avatar
Aymeric Augustin committed
22

23
from .test_tokenization_common import TokenizerTesterMixin
24
from .utils import slow
25
26


27
class RobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
LysandreJik's avatar
LysandreJik committed
28
    tokenizer_class = RobertaTokenizer
29

LysandreJik's avatar
LysandreJik committed
30
    def setUp(self):
Julien Chaumond's avatar
Julien Chaumond committed
31
        super().setUp()
LysandreJik's avatar
LysandreJik committed
32
33

        # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
        vocab = [
            "l",
            "o",
            "w",
            "e",
            "r",
            "s",
            "t",
            "i",
            "d",
            "n",
            "\u0120",
            "\u0120l",
            "\u0120n",
            "\u0120lo",
            "\u0120low",
            "er",
            "\u0120lowest",
            "\u0120newer",
            "\u0120wider",
            "<unk>",
        ]
56
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
57
        merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
LysandreJik's avatar
LysandreJik committed
58
59
        self.special_tokens_map = {"unk_token": "<unk>"}

60
61
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
62
        with open(self.vocab_file, "w", encoding="utf-8") as fp:
thomwolf's avatar
thomwolf committed
63
            fp.write(json.dumps(vocab_tokens) + "\n")
64
        with open(self.merges_file, "w", encoding="utf-8") as fp:
LysandreJik's avatar
LysandreJik committed
65
            fp.write("\n".join(merges))
66

67
68
69
    def get_tokenizer(self, **kwargs):
        kwargs.update(self.special_tokens_map)
        return RobertaTokenizer.from_pretrained(self.tmpdirname, **kwargs)
70

71
72
73
74
75
    def get_rust_tokenizer(self, **kwargs):
        kwargs.update(self.special_tokens_map)
        return RobertaTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)

    def get_input_output_texts(self, tokenizer):
76
77
        input_text = "lower newer"
        output_text = "lower newer"
LysandreJik's avatar
LysandreJik committed
78
79
80
        return input_text, output_text

    def test_full_tokenizer(self):
81
        tokenizer = RobertaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
thomwolf's avatar
thomwolf committed
82
        text = "lower newer"
83
84
        bpe_tokens = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
        tokens = tokenizer.tokenize(text)  # , add_prefix_space=True)
LysandreJik's avatar
LysandreJik committed
85
        self.assertListEqual(tokens, bpe_tokens)
86

LysandreJik's avatar
LysandreJik committed
87
        input_tokens = tokens + [tokenizer.unk_token]
88
        input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
89
        self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
90

LysandreJik's avatar
LysandreJik committed
91
92
    def roberta_dict_integration_testing(self):
        tokenizer = self.get_tokenizer()
93

94
        self.assertListEqual(tokenizer.encode("Hello world!", add_special_tokens=False), [0, 31414, 232, 328, 2])
LysandreJik's avatar
LysandreJik committed
95
        self.assertListEqual(
96
97
            tokenizer.encode("Hello world! c茅c茅 herlolip 418", add_special_tokens=False),
            [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2],
LysandreJik's avatar
LysandreJik committed
98
        )
99

100
    @slow
101
102
103
    def test_sequence_builders(self):
        tokenizer = RobertaTokenizer.from_pretrained("roberta-base")

Lysandre's avatar
Remove  
Lysandre committed
104
105
        text = tokenizer.encode("sequence builders", add_special_tokens=False)
        text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
106

107
108
109
        encoded_text_from_decode = tokenizer.encode(
            "sequence builders", add_special_tokens=True, add_prefix_space=False
        )
110
        encoded_pair_from_decode = tokenizer.encode(
111
            "sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False
112
        )
LysandreJik's avatar
LysandreJik committed
113

114
115
        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
116

LysandreJik's avatar
LysandreJik committed
117
118
        assert encoded_sentence == encoded_text_from_decode
        assert encoded_pair == encoded_pair_from_decode
119
120
121
122
123
124
125
126

    def test_space_encoding(self):
        tokenizer = self.get_tokenizer()

        sequence = "Encode this sequence."
        space_encoding = tokenizer.byte_encoder[" ".encode("utf-8")[0]]

        # Testing encoder arguments
127
        encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False)
128
129
130
131
132
133
134
135
136
137
        first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0]
        self.assertNotEqual(first_char, space_encoding)

        encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
        first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0]
        self.assertEqual(first_char, space_encoding)

        tokenizer.add_special_tokens({"bos_token": "<s>"})
        encoded = tokenizer.encode(sequence, add_special_tokens=True)
        first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0]
138
        self.assertNotEqual(first_char, space_encoding)
139
140
141

        # Testing spaces after special tokenss
        mask = "<mask>"
142
143
144
        tokenizer.add_special_tokens(
            {"mask_token": AddedToken(mask, lstrip=True, rstrip=False)}
        )  # mask token has a left space
145
146
147
148
149
150
151
152
153
154
155
156
157
158
        mask_ind = tokenizer.convert_tokens_to_ids(mask)

        sequence = "Encode <mask> sequence"
        sequence_nospace = "Encode <mask>sequence"

        encoded = tokenizer.encode(sequence)
        mask_loc = encoded.index(mask_ind)
        first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
        self.assertEqual(first_char, space_encoding)

        encoded = tokenizer.encode(sequence_nospace)
        mask_loc = encoded.index(mask_ind)
        first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
        self.assertNotEqual(first_char, space_encoding)