test_tokenization_fast.py 42.1 KB
Newer Older
1
import logging
2
import unittest
Funtowicz Morgan's avatar
Funtowicz Morgan committed
3
4
from collections import namedtuple
from itertools import takewhile
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

from tests.utils import require_torch
from transformers import (
    BertTokenizer,
    BertTokenizerFast,
    DistilBertTokenizer,
    GPT2Tokenizer,
    GPT2TokenizerFast,
    OpenAIGPTTokenizer,
    PreTrainedTokenizer,
    RobertaTokenizer,
    TransfoXLTokenizer,
    is_torch_available,
)
from transformers.tokenization_distilbert import DistilBertTokenizerFast
from transformers.tokenization_openai import OpenAIGPTTokenizerFast
from transformers.tokenization_roberta import RobertaTokenizerFast
from transformers.tokenization_transfo_xl import TransfoXLTokenizerFast


25
26
logger = logging.getLogger(__name__)

Funtowicz Morgan's avatar
Funtowicz Morgan committed
27
NON_ENGLISH_TAGS = ["chinese", "dutch", "french", "finnish", "german", "multilingual"]
28
Tokenizer = namedtuple("Tokenizer", ["name", "rust_cls", "python_cls", "vocab_key", "filter", "kwargs"])
Funtowicz Morgan's avatar
Funtowicz Morgan committed
29

30

Funtowicz Morgan's avatar
Funtowicz Morgan committed
31
32
33
def filter_non_english(_: Tokenizer, pretrained_name: str):
    """ Filter all the model for non-english language """
    return not any([lang in pretrained_name for lang in NON_ENGLISH_TAGS])
34
35


Funtowicz Morgan's avatar
Funtowicz Morgan committed
36
37
def filter_roberta_detectors(_: Tokenizer, pretrained_name: str):
    return "detector" not in pretrained_name
38
39


Funtowicz Morgan's avatar
Funtowicz Morgan committed
40
class CommonFastTokenizerTest(unittest.TestCase):
41

Funtowicz Morgan's avatar
Funtowicz Morgan committed
42
43
44
45
46
    TOKENIZERS_CLASSES = frozenset([])

    def setUp(self) -> None:
        with open("tests/fixtures/sample_text.txt", encoding="utf-8") as f_data:
            self._data = f_data.read().replace("\n\n", "\n").strip()
47

Funtowicz Morgan's avatar
Funtowicz Morgan committed
48
49
50
51
52
53
54
55
56
    def test_all_tokenizers(self):
        for tok_case in self.TOKENIZERS_CLASSES:
            for pretrained_name in tok_case.python_cls.pretrained_vocab_files_map[tok_case.vocab_key].keys():

                # Tokenizer.filter makes it possible to filter which Tokenizer to case based on all the
                # information available in Tokenizer (name, rust class, python class, vocab key name)
                if tok_case.filter is None or (
                    tok_case.filter is not None and tok_case.filter(tok_case, pretrained_name)
                ):
57
                    kwargs = dict(t for t in tok_case.kwargs) if tok_case.kwargs else {}
Funtowicz Morgan's avatar
Funtowicz Morgan committed
58
                    with self.subTest("{} ({})".format(tok_case.name, pretrained_name)):
59
60
                        tokenizer_r = tok_case.rust_cls.from_pretrained(pretrained_name, **kwargs)
                        tokenizer_p = tok_case.python_cls.from_pretrained(pretrained_name, **kwargs)
Funtowicz Morgan's avatar
Funtowicz Morgan committed
61

62
                        self.fast_align_python(tokenizer_r, tokenizer_p, tok_case, pretrained_name)
Funtowicz Morgan's avatar
Funtowicz Morgan committed
63
64
                        self.fast_only(tokenizer_r)

65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
    def test_pretokenized_tokenizers(self):
        for tok_case in self.TOKENIZERS_CLASSES:
            for pretrained_name in tok_case.python_cls.pretrained_vocab_files_map[tok_case.vocab_key].keys():

                # Tokenizer.filter makes it possible to filter which Tokenizer to case based on all the
                # information available in Tokenizer (name, rust class, python class, vocab key name)
                if tok_case.filter is None or (
                    tok_case.filter is not None and tok_case.filter(tok_case, pretrained_name)
                ):
                    with self.subTest("{} ({})".format(tok_case.name, pretrained_name)):
                        tokenizer_r = tok_case.rust_cls.from_pretrained(pretrained_name, add_prefix_space=True)
                        tokenizer_p = tok_case.python_cls.from_pretrained(pretrained_name, add_prefix_space=True)

                        self.assert_pretokenized_inputs(tokenizer_r, tokenizer_p)

80
    def fast_align_python(self, tokenizer_r, tokenizer_p, tok_case, pretrained_name):
Funtowicz Morgan's avatar
Funtowicz Morgan committed
81
82
83
84
85
86
87
88
89
90
91
        # Check is_fast is set correctly
        self.assertFalse(tokenizer_p.is_fast)
        self.assertTrue(tokenizer_r.is_fast)

        # Check that Rust and Python align
        self.assert_tokenization_python_rust_equals(tokenizer_r, tokenizer_p)
        self.assert_num_special_tokens_to_add_equal(tokenizer_r, tokenizer_p)
        self.assert_max_length_equal(tokenizer_r, tokenizer_p)
        self.assert_special_tokens_map_equal(tokenizer_r, tokenizer_p)
        self.assert_embeded_special_tokens(tokenizer_r, tokenizer_p)
        self.assert_padding(tokenizer_r, tokenizer_p)
92
        self.assert_create_token_type_ids(tokenizer_r, tokenizer_p)
Funtowicz Morgan's avatar
Funtowicz Morgan committed
93
94
95
96
97
98
99
100
101
102
103
104
105
        # TODO: enable for v3.0.0
        # self.assert_empty_output_no_special_tokens(tokenizer_r, tokenizer_p)

    def fast_only(self, tokenizer_r):
        # Ensure None raise an error
        self.assertRaises(ValueError, tokenizer_r.tokenize, None)
        self.assertRaises(ValueError, tokenizer_r.encode, None)
        self.assertRaises(ValueError, tokenizer_r.encode_plus, None)
        self.assertRaises(ValueError, tokenizer_r.batch_encode_plus, None)

        self.assert_add_tokens(tokenizer_r)
        self.assert_offsets_mapping(tokenizer_r)
        self.assert_add_special_tokens(tokenizer_r)
106
        self.assert_alignement_methods(tokenizer_r)
107
        self.assert_batch_encode_dynamic_overflowing(tokenizer_r)
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185

    def assert_alignement_methods(self, tokenizer_r):
        words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"]
        text = " ".join(words)
        batch_size = 3

        encoding = tokenizer_r.encode_plus(text, add_special_tokens=False)

        batch_encoding = tokenizer_r.batch_encode_plus([text] * batch_size, add_special_tokens=False)
        num_tokens = len(encoding["input_ids"])

        last_word_index = len(words) - 1
        last_token_index = num_tokens - 1
        last_batch_index = batch_size - 1
        last_char_index = len(text) - 1

        # words, tokens
        self.assertEqual(len(encoding.words(0)), num_tokens)
        self.assertEqual(max(encoding.words(0)), last_word_index)
        self.assertEqual(min(encoding.words(0)), 0)
        self.assertEqual(len(batch_encoding.words(last_batch_index)), num_tokens)
        self.assertEqual(max(batch_encoding.words(last_batch_index)), last_word_index)
        self.assertEqual(min(batch_encoding.words(last_batch_index)), 0)
        self.assertEqual(len(encoding.tokens(0)), num_tokens)

        # Assert token_to_word
        self.assertEqual(encoding.token_to_word(0), 0)
        self.assertEqual(encoding.token_to_word(0, 0), 0)
        self.assertEqual(encoding.token_to_word(last_token_index), last_word_index)
        self.assertEqual(encoding.token_to_word(0, last_token_index), last_word_index)
        self.assertEqual(batch_encoding.token_to_word(1, 0), 0)
        self.assertEqual(batch_encoding.token_to_word(0, last_token_index), last_word_index)
        self.assertEqual(batch_encoding.token_to_word(last_batch_index, last_token_index), last_word_index)

        # Assert word_to_tokens
        self.assertEqual(encoding.word_to_tokens(0).start, 0)
        self.assertEqual(encoding.word_to_tokens(0, 0).start, 0)
        self.assertEqual(encoding.word_to_tokens(last_word_index).end, last_token_index + 1)
        self.assertEqual(encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
        self.assertEqual(batch_encoding.word_to_tokens(1, 0).start, 0)
        self.assertEqual(batch_encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
        self.assertEqual(batch_encoding.word_to_tokens(last_batch_index, last_word_index).end, last_token_index + 1)

        # Assert token_to_chars
        self.assertEqual(encoding.token_to_chars(0).start, 0)
        self.assertEqual(encoding.token_to_chars(0, 0).start, 0)
        self.assertEqual(encoding.token_to_chars(last_token_index).end, last_char_index + 1)
        self.assertEqual(encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
        self.assertEqual(batch_encoding.token_to_chars(1, 0).start, 0)
        self.assertEqual(batch_encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
        self.assertEqual(batch_encoding.token_to_chars(last_batch_index, last_token_index).end, last_char_index + 1)

        # Assert char_to_token
        self.assertEqual(encoding.char_to_token(0), 0)
        self.assertEqual(encoding.char_to_token(0, 0), 0)
        self.assertEqual(encoding.char_to_token(last_char_index), last_token_index)
        self.assertEqual(encoding.char_to_token(0, last_char_index), last_token_index)
        self.assertEqual(batch_encoding.char_to_token(1, 0), 0)
        self.assertEqual(batch_encoding.char_to_token(0, last_char_index), last_token_index)
        self.assertEqual(batch_encoding.char_to_token(last_batch_index, last_char_index), last_token_index)

        # Assert char_to_word
        self.assertEqual(encoding.char_to_word(0), 0)
        self.assertEqual(encoding.char_to_word(0, 0), 0)
        self.assertEqual(encoding.char_to_word(last_char_index), last_word_index)
        self.assertEqual(encoding.char_to_word(0, last_char_index), last_word_index)
        self.assertEqual(batch_encoding.char_to_word(1, 0), 0)
        self.assertEqual(batch_encoding.char_to_word(0, last_char_index), last_word_index)
        self.assertEqual(batch_encoding.char_to_word(last_batch_index, last_char_index), last_word_index)

        # Assert word_to_chars
        self.assertEqual(encoding.word_to_chars(0).start, 0)
        self.assertEqual(encoding.word_to_chars(0, 0).start, 0)
        self.assertEqual(encoding.word_to_chars(last_word_index).end, last_char_index + 1)
        self.assertEqual(encoding.word_to_chars(0, last_word_index).end, last_char_index + 1)
        self.assertEqual(batch_encoding.word_to_chars(1, 0).start, 0)
        self.assertEqual(batch_encoding.word_to_chars(0, last_word_index).end, last_char_index + 1)
        self.assertEqual(batch_encoding.word_to_chars(last_batch_index, last_word_index).end, last_char_index + 1)
Funtowicz Morgan's avatar
Funtowicz Morgan committed
186

187
    def assert_tokenization_python_rust_equals(self, tokenizer_r, tokenizer_p):
188
189
190
191
192
        # Ensure basic input match
        input_p = tokenizer_p.encode_plus(self._data)
        input_r = tokenizer_r.encode_plus(self._data)

        for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
Funtowicz Morgan's avatar
Funtowicz Morgan committed
193
            self.assertSequenceEqual(input_p[key], input_r[key])
194
195
196
197
198

        input_pairs_p = tokenizer_p.encode_plus(self._data, self._data)
        input_pairs_r = tokenizer_r.encode_plus(self._data, self._data)

        for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
Funtowicz Morgan's avatar
Funtowicz Morgan committed
199
            self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
200
201

        # Ensure truncation match
202
203
        input_p = tokenizer_p.encode_plus(self._data, max_length=512, truncation=True)
        input_r = tokenizer_r.encode_plus(self._data, max_length=512, truncation=True)
204
205

        for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
Funtowicz Morgan's avatar
Funtowicz Morgan committed
206
            self.assertSequenceEqual(input_p[key], input_r[key])
207
208

        # Ensure truncation with stride match
209
210
211
212
213
214
        input_p = tokenizer_p.encode_plus(
            self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
        )
        input_r = tokenizer_r.encode_plus(
            self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
        )
215
216

        for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
217
            self.assertSequenceEqual(input_p[key], input_r[key][0])
Funtowicz Morgan's avatar
Funtowicz Morgan committed
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232

    def assert_num_special_tokens_to_add_equal(self, tokenizer_r, tokenizer_p):
        # Check we have the same number of added_tokens for both pair and non-pair inputs.
        self.assertEqual(tokenizer_r.num_special_tokens_to_add(False), tokenizer_p.num_special_tokens_to_add(False))
        self.assertEqual(tokenizer_r.num_special_tokens_to_add(True), tokenizer_p.num_special_tokens_to_add(True))

    def assert_max_length_equal(self, tokenizer_r, tokenizer_p):
        # Check we have the correct max_length for both pair and non-pair inputs.
        self.assertEqual(tokenizer_r.max_len_single_sentence, tokenizer_p.max_len_single_sentence)
        self.assertEqual(tokenizer_r.max_len_sentences_pair, tokenizer_p.max_len_sentences_pair)

    def assert_special_tokens_map_equal(self, tokenizer_r, tokenizer_p):
        # Assert the set of special tokens match.
        self.assertSequenceEqual(
            tokenizer_p.special_tokens_map.items(), tokenizer_r.special_tokens_map.items(),
233
234
        )

235
236
237
238
239
240
241
242
    def assert_add_tokens(self, tokenizer_r):
        vocab_size = tokenizer_r.vocab_size
        self.assertEqual(tokenizer_r.add_tokens(""), 0)
        self.assertEqual(tokenizer_r.add_tokens("testoken"), 1)
        self.assertEqual(tokenizer_r.add_tokens(["testoken1", "testtoken2"]), 2)
        self.assertEqual(len(tokenizer_r), vocab_size + 3)

        self.assertEqual(tokenizer_r.add_special_tokens({}), 0)
243
        self.assertEqual(tokenizer_r.add_special_tokens({"bos_token": "[BOS]", "eos_token": "[EOS]"}), 2)
244
245
246
247
248
249
250
        self.assertRaises(
            AssertionError, tokenizer_r.add_special_tokens, {"additional_special_tokens": "<testtoken1>"}
        )
        self.assertEqual(tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken2>"]}), 1)
        self.assertEqual(
            tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken3>", "<testtoken4>"]}), 2
        )
251
        self.assertEqual(len(tokenizer_r), vocab_size + 8)
252

Funtowicz Morgan's avatar
Funtowicz Morgan committed
253
    def assert_offsets_mapping(self, tokenizer_r):
254
255
256
257
        text = "Wonderful no inspiration example with subtoken"
        pair = "Along with an awesome pair"

        # No pair
Funtowicz Morgan's avatar
Funtowicz Morgan committed
258
259
260
261
        tokens_with_offsets = tokenizer_r.encode_plus(
            text, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True
        )
        added_tokens = tokenizer_r.num_special_tokens_to_add(False)
262
263
264
265
266
267
268
269
270
        offsets = tokens_with_offsets["offset_mapping"]

        # Assert there is the same number of tokens and offsets
        self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))

        # Assert there is online added_tokens special_tokens
        self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)

        # Pairs
Funtowicz Morgan's avatar
Funtowicz Morgan committed
271
272
        tokens_with_offsets = tokenizer_r.encode_plus(
            text, pair, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True
273
        )
Funtowicz Morgan's avatar
Funtowicz Morgan committed
274
        added_tokens = tokenizer_r.num_special_tokens_to_add(True)
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
        offsets = tokens_with_offsets["offset_mapping"]

        # Assert there is the same number of tokens and offsets
        self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))

        # Assert there is online added_tokens special_tokens
        self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)

    def assert_batch_encode_dynamic_overflowing(self, tokenizer: PreTrainedTokenizer):
        """
        When calling batch_encode with multiple sequence it can returns different number of
        overflowing encoding for each sequence:
        [
          Sequence 1: [Encoding 1, Encoding 2],
          Sequence 2: [Encoding 1],
          Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
        ]
        This needs to be padded so that it can represented as a tensor
        """
        returned_tensor = "pt" if is_torch_available() else "tf"

296
297
298
        if not tokenizer.pad_token or tokenizer.pad_token_id < 0:
            return

299
300
301
        tokens = tokenizer.encode_plus(
            "HuggingFace is solving NLP one commit at a time",
            max_length=6,
302
303
            padding=True,
            truncation=True,
304
305
306
307
308
309
310
311
312
313
314
            return_tensors=returned_tensor,
            return_overflowing_tokens=True,
        )

        for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
            self.assertEqual(len(tokens[key].shape), 2)

        # Mono sample
        tokens = tokenizer.batch_encode_plus(
            ["HuggingFace is solving NLP one commit at a time"],
            max_length=6,
315
316
            padding=True,
            truncation="only_first",
317
318
319
320
321
322
323
324
325
326
327
328
            return_tensors=returned_tensor,
            return_overflowing_tokens=True,
        )

        for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
            self.assertEqual(len(tokens[key].shape), 2)
            self.assertEqual(tokens[key].shape[-1], 6)

        # Multi sample
        tokens = tokenizer.batch_encode_plus(
            ["HuggingFace is solving NLP one commit at a time", "Very tiny input"],
            max_length=6,
329
330
            padding=True,
            truncation="only_first",
331
332
333
334
335
336
337
338
            return_tensors=returned_tensor,
            return_overflowing_tokens=True,
        )

        for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
            self.assertEqual(len(tokens[key].shape), 2)
            self.assertEqual(tokens[key].shape[-1], 6)

339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
    def assert_pretokenized_inputs(self, tokenizer_r, tokenizer_p):
        # Input string
        pretokenized_input_simple = "This is a sample input".split()
        pretokenized_input_pair = "This is a sample pair".split()

        # Test encode for pretokenized inputs
        output_r = tokenizer_r.encode(pretokenized_input_simple, is_pretokenized=True)
        output_p = tokenizer_p.encode(pretokenized_input_simple, is_pretokenized=True)
        self.assertEqual(output_p, output_r)

        kwargs = {
            "is_pretokenized": True,
            "return_token_type_ids": True,
            "return_attention_mask": True,
            "return_overflowing_tokens": False,
            "return_special_tokens_mask": True,
            "return_offsets_mapping": False,  # Not implemented in python tokenizers
        }
357
358
359
360
361
362
363
364
        batch_kwargs = {
            "is_pretokenized": True,
            "return_token_type_ids": True,
            "return_attention_mask": True,  # we have an 's' here
            "return_overflowing_tokens": False,
            "return_special_tokens_mask": True,  # we have an 's' here
            "return_offsets_mapping": False,  # Not implemented in python tokenizers
        }
365
366
367
368
369
370
371
372
        # Test encode_plus for pretokenized inputs
        output_r = tokenizer_r.encode_plus(pretokenized_input_simple, **kwargs)
        output_p = tokenizer_p.encode_plus(pretokenized_input_simple, **kwargs)
        for key in output_p.keys():
            self.assertEqual(output_p[key], output_r[key])

        # Test batch_encode_plus for pretokenized inputs
        input_batch = ([pretokenized_input_simple] * 2) + [pretokenized_input_simple + pretokenized_input_pair]
373
374
        output_r = tokenizer_r.batch_encode_plus(input_batch, **batch_kwargs)
        output_p = tokenizer_p.batch_encode_plus(input_batch, **batch_kwargs)
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
        for key in output_p.keys():
            self.assertEqual(output_p[key], output_r[key])

        # Test encode for pretokenized inputs pairs
        output_r = tokenizer_r.encode(pretokenized_input_simple, pretokenized_input_pair, is_pretokenized=True)
        output_p = tokenizer_p.encode(pretokenized_input_simple, pretokenized_input_pair, is_pretokenized=True)
        self.assertEqual(output_p, output_r)

        # Test encode_plus for pretokenized inputs
        output_r = tokenizer_r.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
        output_p = tokenizer_p.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
        for key in output_p.keys():
            self.assertEqual(output_p[key], output_r[key])

        # Test batch_encode_plus for pretokenized inputs
        input_batch_pair = ([pretokenized_input_simple, pretokenized_input_pair] * 2) + [
            pretokenized_input_simple + pretokenized_input_pair,
            pretokenized_input_pair,
        ]
394
395
        output_r = tokenizer_r.batch_encode_plus(input_batch_pair, **batch_kwargs)
        output_p = tokenizer_p.batch_encode_plus(input_batch_pair, **batch_kwargs)
396
397
398
        for key in output_p.keys():
            self.assertEqual(output_p[key], output_r[key])

399
400
401
402
403
404
405
406
407
408
409
410
411
412
    def assert_create_token_type_ids(self, tokenizer_r, tokenizer_p):
        input_simple = [1, 2, 3]
        input_pair = [1, 2, 3]

        # Generate output
        output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple)
        output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple)
        self.assertEqual(output_p, output_r)

        # Generate pair output
        output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple, input_pair)
        output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple, input_pair)
        self.assertEqual(output_p, output_r)

413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
    def assert_build_inputs_with_special_tokens(self, tokenizer_r, tokenizer_p):
        # Input string
        input_simple = tokenizer_p.tokenize("This is a sample input")
        input_pair = tokenizer_p.tokenize("This is a sample pair")

        # Generate output
        output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
        output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
        self.assertEqual(output_p, output_r)

        # Generate pair output
        output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
        output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
        self.assertEqual(output_p, output_r)

        # Input tokens id
        input_simple = tokenizer_p.encode("This is a sample input")
        input_pair = tokenizer_p.encode("This is a sample pair")

        # Generate output
        output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
        output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
        self.assertEqual(output_p, output_r)

        # Generate pair output
        output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
        output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
        self.assertEqual(output_p, output_r)

Funtowicz Morgan's avatar
Funtowicz Morgan committed
442
443
    def assert_padding(self, tokenizer_r, tokenizer_p, max_length=15):
        def assert_padded_input_match(input_r: list, input_p: list, max_length: int):
444

Funtowicz Morgan's avatar
Funtowicz Morgan committed
445
            # Ensure we match max_length
446
447
            self.assertEqual(len(input_r), max_length)
            self.assertEqual(len(input_p), max_length)
448

Funtowicz Morgan's avatar
Funtowicz Morgan committed
449
450
451
452
            # Ensure the number of padded tokens is the same
            padded_tokens_r = list(takewhile(lambda i: i == tokenizer_r.pad_token_id, reversed(input_r)))
            padded_tokens_p = list(takewhile(lambda i: i == tokenizer_p.pad_token_id, reversed(input_p)))
            self.assertSequenceEqual(padded_tokens_r, padded_tokens_p)
453

454
        def assert_batch_padded_input_match(input_r: dict, input_p: dict, max_length: int):
Funtowicz Morgan's avatar
Funtowicz Morgan committed
455
            for i_r in input_r.values():
456
457
458
459
460
461
                self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(
                    len(i_r[1]), max_length
                )
                self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(
                    len(i_r[1]), max_length
                )
462

Funtowicz Morgan's avatar
Funtowicz Morgan committed
463
464
            for i_r, i_p in zip(input_r["input_ids"], input_p["input_ids"]):
                assert_padded_input_match(i_r, i_p, max_length)
465

Funtowicz Morgan's avatar
Funtowicz Morgan committed
466
467
            for i_r, i_p in zip(input_r["attention_mask"], input_p["attention_mask"]):
                self.assertSequenceEqual(i_r, i_p)
468

469
        # Encode - Simple input
Funtowicz Morgan's avatar
Funtowicz Morgan committed
470
471
472
        input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, pad_to_max_length=True)
        input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, pad_to_max_length=True)
        assert_padded_input_match(input_r, input_p, max_length)
473
474
475
        input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, padding="max_length")
        input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, padding="max_length")
        assert_padded_input_match(input_r, input_p, max_length)
476

477
478
479
480
481
        input_r = tokenizer_r.encode("This is a simple input", padding="longest")
        input_p = tokenizer_p.encode("This is a simple input", padding=True)
        assert_padded_input_match(input_r, input_p, len(input_r))

        # Encode - Pair input
Funtowicz Morgan's avatar
Funtowicz Morgan committed
482
483
484
485
486
487
488
        input_r = tokenizer_r.encode(
            "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
        )
        input_p = tokenizer_p.encode(
            "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
        )
        assert_padded_input_match(input_r, input_p, max_length)
489
490
491
492
493
494
495
496
497
498
        input_r = tokenizer_r.encode(
            "This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
        )
        input_p = tokenizer_p.encode(
            "This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
        )
        assert_padded_input_match(input_r, input_p, max_length)
        input_r = tokenizer_r.encode("This is a simple input", "This is a pair", padding=True)
        input_p = tokenizer_p.encode("This is a simple input", "This is a pair", padding="longest")
        assert_padded_input_match(input_r, input_p, len(input_r))
499

500
        # Encode_plus - Simple input
Funtowicz Morgan's avatar
Funtowicz Morgan committed
501
502
503
504
        input_r = tokenizer_r.encode_plus("This is a simple input", max_length=max_length, pad_to_max_length=True)
        input_p = tokenizer_p.encode_plus("This is a simple input", max_length=max_length, pad_to_max_length=True)
        assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
        self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
505
506
507
508
        input_r = tokenizer_r.encode_plus("This is a simple input", max_length=max_length, padding="max_length")
        input_p = tokenizer_p.encode_plus("This is a simple input", max_length=max_length, padding="max_length")
        assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
        self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
509

510
511
512
513
514
515
516
        input_r = tokenizer_r.encode_plus("This is a simple input", padding="longest")
        input_p = tokenizer_p.encode_plus("This is a simple input", padding=True)
        assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]))

        self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])

        # Encode_plus - Pair input
Funtowicz Morgan's avatar
Funtowicz Morgan committed
517
518
519
520
521
522
523
524
        input_r = tokenizer_r.encode_plus(
            "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
        )
        input_p = tokenizer_p.encode_plus(
            "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
        )
        assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
        self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
525
526
527
528
529
530
531
532
533
534
535
536
        input_r = tokenizer_r.encode_plus(
            "This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
        )
        input_p = tokenizer_p.encode_plus(
            "This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
        )
        assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
        self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
        input_r = tokenizer_r.encode_plus("This is a simple input", "This is a pair", padding="longest")
        input_p = tokenizer_p.encode_plus("This is a simple input", "This is a pair", padding=True)
        assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]))
        self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
537

538
        # Batch_encode_plus - Simple input
Funtowicz Morgan's avatar
Funtowicz Morgan committed
539
540
541
542
543
544
        input_r = tokenizer_r.batch_encode_plus(
            ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, pad_to_max_length=True
        )
        input_p = tokenizer_p.batch_encode_plus(
            ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, pad_to_max_length=True
        )
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
        assert_batch_padded_input_match(input_r, input_p, max_length)

        input_r = tokenizer_r.batch_encode_plus(
            ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="max_length",
        )
        input_p = tokenizer_p.batch_encode_plus(
            ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="max_length",
        )
        assert_batch_padded_input_match(input_r, input_p, max_length)

        input_r = tokenizer_r.batch_encode_plus(
            ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="longest",
        )
        input_p = tokenizer_p.batch_encode_plus(
            ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding=True,
        )
        assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]))

        input_r = tokenizer_r.batch_encode_plus(
            ["This is a simple input 1", "This is a simple input 2"], padding="longest"
        )
        input_p = tokenizer_p.batch_encode_plus(["This is a simple input 1", "This is a simple input 2"], padding=True)
        assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]))

        # Batch_encode_plus - Pair input
        input_r = tokenizer_r.batch_encode_plus(
            [
                ("This is a simple input 1", "This is a simple input 2"),
                ("This is a simple pair 1", "This is a simple pair 2"),
            ],
            max_length=max_length,
            truncation=True,
            padding="max_length",
        )
        input_p = tokenizer_p.batch_encode_plus(
            [
                ("This is a simple input 1", "This is a simple input 2"),
                ("This is a simple pair 1", "This is a simple pair 2"),
            ],
            max_length=max_length,
            truncation=True,
            padding="max_length",
        )
        assert_batch_padded_input_match(input_r, input_p, max_length)
589

Funtowicz Morgan's avatar
Funtowicz Morgan committed
590
591
592
593
594
        input_r = tokenizer_r.batch_encode_plus(
            [
                ("This is a simple input 1", "This is a simple input 2"),
                ("This is a simple pair 1", "This is a simple pair 2"),
            ],
595
            padding=True,
Funtowicz Morgan's avatar
Funtowicz Morgan committed
596
597
598
599
600
601
        )
        input_p = tokenizer_p.batch_encode_plus(
            [
                ("This is a simple input 1", "This is a simple input 2"),
                ("This is a simple pair 1", "This is a simple pair 2"),
            ],
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
            padding="longest",
        )
        assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]))

        # Using pad on single examples after tokenization
        input_r = tokenizer_r.encode_plus("This is a input 1")
        input_r = tokenizer_r.pad(input_r)

        input_p = tokenizer_r.encode_plus("This is a input 1")
        input_p = tokenizer_r.pad(input_p)

        assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]))

        # Using pad on single examples after tokenization
        input_r = tokenizer_r.encode_plus("This is a input 1")
        input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")

        input_p = tokenizer_r.encode_plus("This is a input 1")
        input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")

        assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)

        # Using pad after tokenization
        input_r = tokenizer_r.batch_encode_plus(
            ["This is a input 1", "This is a much longer input whilch should be padded"]
Funtowicz Morgan's avatar
Funtowicz Morgan committed
627
        )
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
        input_r = tokenizer_r.pad(input_r)

        input_p = tokenizer_r.batch_encode_plus(
            ["This is a input 1", "This is a much longer input whilch should be padded"]
        )
        input_p = tokenizer_r.pad(input_p)

        assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]))

        # Using pad after tokenization
        input_r = tokenizer_r.batch_encode_plus(
            ["This is a input 1", "This is a much longer input whilch should be padded"]
        )
        input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")

        input_p = tokenizer_r.batch_encode_plus(
            ["This is a input 1", "This is a much longer input whilch should be padded"]
        )
        input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")

        assert_batch_padded_input_match(input_r, input_p, max_length)
649

Funtowicz Morgan's avatar
Funtowicz Morgan committed
650
651
652
    def assert_save_pretrained(self, tokenizer_r, tokenizer_p):
        # Checks it save with the same files
        self.assertSequenceEqual(tokenizer_r.save_vocabulary("."), tokenizer_p.save_vocabulary("."))
653

Funtowicz Morgan's avatar
Funtowicz Morgan committed
654
655
        # Checks everything loads correctly in the same way
        tokenizer_rp, tokenizer_pp = tokenizer_r.from_pretrained("."), tokenizer_p.from_pretrained(".")
656

Funtowicz Morgan's avatar
Funtowicz Morgan committed
657
658
659
660
661
        # Check special tokens are set accordingly on Rust and Python
        for key in tokenizer_pp.special_tokens_map:
            self.assertTrue(hasattr(tokenizer_rp, key))
            # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
            # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
662

Funtowicz Morgan's avatar
Funtowicz Morgan committed
663
664
665
666
667
668
669
670
    def assert_embeded_special_tokens(self, tokenizer_r, tokenizer_p):
        sentence = "A, <mask> AllenNLP sentence."
        tokens_r = tokenizer_r.encode_plus(
            sentence, add_special_tokens=True, return_attention_mask=False, return_token_type_ids=True
        )
        tokens_p = tokenizer_p.encode_plus(
            sentence, add_special_tokens=True, return_attention_mask=False, return_token_type_ids=True
        )
671

Funtowicz Morgan's avatar
Funtowicz Morgan committed
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
        for key in tokens_p.keys():
            self.assertEqual(tokens_r[key], tokens_p[key])

        self.assertEqual(sum(tokens_r["token_type_ids"]), 0)
        self.assertEqual(sum(tokens_p["token_type_ids"]), 0)

        tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
        tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
        self.assertSequenceEqual(tokens_r, tokens_p)

    def assert_add_special_tokens(self, tokenizer_r):
        simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
        # pair_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=True)

        for text in ["", " "]:
            # tokenize()
            no_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=False)
            with_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=True)
            self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)

            # encode()
            no_special_tokens = tokenizer_r.encode(text, add_special_tokens=False)
            with_special_tokens = tokenizer_r.encode(text, add_special_tokens=True)
            self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)

            # encode_plus()
            no_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=False)
            with_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=True)
            for key in no_special_tokens.keys():
                self.assertEqual(
                    len(no_special_tokens[key]), len(with_special_tokens[key]) - simple_num_special_tokens_to_add
                )

            # # batch_encode_plus
            no_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=False)
            with_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=True)
            for key in no_special_tokens.keys():
                for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):
                    self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)


class WordPieceFastTokenizerTest(CommonFastTokenizerTest):
    """
    Override all the specific methods to test WordPiece behavior
    """

    TOKENIZERS_CLASSES = frozenset(
        [
720
721
722
723
            Tokenizer("Bert", BertTokenizerFast, BertTokenizer, "vocab_file", filter_non_english, None),
            Tokenizer(
                "DistilBert", DistilBertTokenizerFast, DistilBertTokenizer, "vocab_file", filter_non_english, None
            ),
Funtowicz Morgan's avatar
Funtowicz Morgan committed
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
        ]
    )

    def fast_only(self, tokenizer_r):
        super().fast_only(tokenizer_r)
        self.assert_offsets_with_special_characters(tokenizer_r)

    def assert_add_special_tokens(self, tokenizer_r):
        super().assert_add_special_tokens(tokenizer_r)

    def assert_offsets_with_special_characters(self, tokenizer_r):
        sentence = "A, na茂ve [MASK] AllenNLP sentence."
        tokens = tokenizer_r.encode_plus(
            sentence,
            return_attention_mask=False,
            return_token_type_ids=False,
            return_offsets_mapping=True,
            add_special_tokens=True,
        )
743

Funtowicz Morgan's avatar
Funtowicz Morgan committed
744
745
746
747
748
749
750
751
752
753
754
        expected_results = [
            ((0, 1), "A"),
            ((1, 2), ","),
            ((3, 8), "naive"),  # BERT normalizes this away
            # Append MASK here after lower-casing
            ((16, 21), "Allen"),
            ((22, 24), "##NL"),
            ((24, 25), "##P"),
            ((26, 34), "sentence"),
            ((35, 36), "."),
        ]
755

Funtowicz Morgan's avatar
Funtowicz Morgan committed
756
757
758
        # Check if the tokenizer is uncased
        if tokenizer_r.init_kwargs.get("do_lower_case"):
            expected_results = [(offset, token.lower()) for (offset, token) in expected_results]
759

Funtowicz Morgan's avatar
Funtowicz Morgan committed
760
761
762
763
        # Append the special tokens
        expected_results.insert(3, ((9, 15), "[MASK]"))
        expected_results.insert(0, (None, "[CLS]"))
        expected_results.append((None, "[SEP]"))
764

Funtowicz Morgan's avatar
Funtowicz Morgan committed
765
766
        self.assertEqual([e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
        # self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
767
768


Funtowicz Morgan's avatar
Funtowicz Morgan committed
769
770
class RobertaFastTokenizerTest(CommonFastTokenizerTest):
    TOKENIZERS_CLASSES = frozenset(
771
772
773
774
775
776
777
778
779
780
        [
            Tokenizer(
                "Roberta",
                RobertaTokenizerFast,
                RobertaTokenizer,
                "vocab_file",
                filter_roberta_detectors,
                (("cls_token", "<s>"),),
            )
        ]
Funtowicz Morgan's avatar
Funtowicz Morgan committed
781
    )
782

Funtowicz Morgan's avatar
Funtowicz Morgan committed
783
784
785
786
    def assert_embeded_special_tokens(self, tokenizer_r, tokenizer_p):
        sentence = "A, <mask> AllenNLP sentence."
        tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
        tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
787

Funtowicz Morgan's avatar
Funtowicz Morgan committed
788
        # Rust correctly handles the space before the mask while python doesnt
789
790
        self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
        self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
791

Funtowicz Morgan's avatar
Funtowicz Morgan committed
792
793
        # token_type_ids should put 0 everywhere
        self.assertEquals(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
794

Funtowicz Morgan's avatar
Funtowicz Morgan committed
795
796
797
798
799
        # attention_mask should put 1 everywhere, so sum over length should be 1
        self.assertEquals(
            sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]),
            sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]),
        )
800

Funtowicz Morgan's avatar
Funtowicz Morgan committed
801
        tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
802
803
804
        tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
        self.assertSequenceEqual(tokens_r, ["<s>", "A", ",", "<mask>", "臓Allen", "N", "LP", "臓sentence", ".", "</s>"])
        self.assertSequenceEqual(tokens_p, ["<s>", "A", ",", "<mask>", "臓Allen", "N", "LP", "臓sentence", ".", "</s>"])
805

806

Funtowicz Morgan's avatar
Funtowicz Morgan committed
807
808
class NoPaddingTokenFastTokenizerMatchingTest(CommonFastTokenizerTest):
    TOKENIZERS_CLASSES = [
809
810
        Tokenizer("OpenAI GPT", OpenAIGPTTokenizerFast, OpenAIGPTTokenizer, "vocab_file", None, None),
        Tokenizer("GPT2", GPT2TokenizerFast, GPT2Tokenizer, "vocab_file", None, [("add_prefix_space", True)]),
Funtowicz Morgan's avatar
Funtowicz Morgan committed
811
    ]
812

813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
    def fast_align_python(self, tokenizer_r, tokenizer_p, tok_case, pretrained_name):
        # Check is_fast is set correctly
        self.assertFalse(tokenizer_p.is_fast)
        self.assertTrue(tokenizer_r.is_fast)

        # Check that Rust and Python align
        self.assert_tokenization_python_rust_equals(tokenizer_r, tokenizer_p)
        self.assert_num_special_tokens_to_add_equal(tokenizer_r, tokenizer_p)
        self.assert_max_length_equal(tokenizer_r, tokenizer_p)
        self.assert_special_tokens_map_equal(tokenizer_r, tokenizer_p)
        self.assert_embeded_special_tokens(tokenizer_r, tokenizer_p)
        self.assert_padding(tokenizer_r, tokenizer_p)

        # Specific for
        kwargs = {}
        if tok_case.kwargs is not None:
            kwargs = dict(tok_case.kwargs)
        tokenizer_r = tok_case.rust_cls.from_pretrained(pretrained_name, **kwargs)
        self.assert_pretokenized_inputs(tokenizer_r, tokenizer_p)

Funtowicz Morgan's avatar
Funtowicz Morgan committed
833
834
835
836
837
838
839
840
841
    def assert_padding(self, tokenizer_r, tokenizer_p, max_length=15):
        # Simple input
        s = "This is a simple input"
        s2 = ["This is a simple input 1", "This is a simple input 2"]
        p = ("This is a simple input", "This is a pair")
        p2 = [
            ("This is a simple input 1", "This is a simple input 2"),
            ("This is a simple pair 1", "This is a simple pair 2"),
        ]
842

Funtowicz Morgan's avatar
Funtowicz Morgan committed
843
        # Simple input tests
844
        self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length")
845

Funtowicz Morgan's avatar
Funtowicz Morgan committed
846
        # Simple input
847
        self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length")
848

Funtowicz Morgan's avatar
Funtowicz Morgan committed
849
        # Simple input
850
851
852
        self.assertRaises(
            ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding="max_length",
        )
853

Funtowicz Morgan's avatar
Funtowicz Morgan committed
854
        # Pair input
855
        self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length")
856

Funtowicz Morgan's avatar
Funtowicz Morgan committed
857
        # Pair input
858
        self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length")
859

Funtowicz Morgan's avatar
Funtowicz Morgan committed
860
        # Pair input
861
862
863
        self.assertRaises(
            ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding="max_length",
        )
864

865

Funtowicz Morgan's avatar
Funtowicz Morgan committed
866
867
class TransfoXLFastTokenizerTest(NoPaddingTokenFastTokenizerMatchingTest):
    TOKENIZERS_CLASSES = frozenset(
868
        [Tokenizer("TransfoXL", TransfoXLTokenizerFast, TransfoXLTokenizer, "pretrained_vocab_file", None, None)]
Funtowicz Morgan's avatar
Funtowicz Morgan committed
869
    )
870

Funtowicz Morgan's avatar
Funtowicz Morgan committed
871
872
873
    @require_torch
    def test_all_tokenizers(self):
        super().test_all_tokenizers()
874
875
876
877

    @require_torch
    def test_pretokenized_tokenizers(self):
        super().test_pretokenized_tokenizers()