test_tokenization_roberta.py 4.12 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

16

LysandreJik's avatar
LysandreJik committed
17
import json
Aymeric Augustin's avatar
Aymeric Augustin committed
18
import os
19
import unittest
20

Aymeric Augustin's avatar
Aymeric Augustin committed
21
22
from transformers.tokenization_roberta import VOCAB_FILES_NAMES, RobertaTokenizer

23
from .test_tokenization_common import TokenizerTesterMixin
24
from .utils import slow
25
26


27
class RobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
LysandreJik's avatar
LysandreJik committed
28
    tokenizer_class = RobertaTokenizer
29

LysandreJik's avatar
LysandreJik committed
30
31
32
33
    def setUp(self):
        super(RobertaTokenizationTest, self).setUp()

        # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
        vocab = [
            "l",
            "o",
            "w",
            "e",
            "r",
            "s",
            "t",
            "i",
            "d",
            "n",
            "\u0120",
            "\u0120l",
            "\u0120n",
            "\u0120lo",
            "\u0120low",
            "er",
            "\u0120lowest",
            "\u0120newer",
            "\u0120wider",
            "<unk>",
        ]
56
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
57
        merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
LysandreJik's avatar
LysandreJik committed
58
59
        self.special_tokens_map = {"unk_token": "<unk>"}

60
61
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
62
        with open(self.vocab_file, "w", encoding="utf-8") as fp:
thomwolf's avatar
thomwolf committed
63
            fp.write(json.dumps(vocab_tokens) + "\n")
64
        with open(self.merges_file, "w", encoding="utf-8") as fp:
LysandreJik's avatar
LysandreJik committed
65
            fp.write("\n".join(merges))
66

67
68
69
    def get_tokenizer(self, **kwargs):
        kwargs.update(self.special_tokens_map)
        return RobertaTokenizer.from_pretrained(self.tmpdirname, **kwargs)
70

LysandreJik's avatar
LysandreJik committed
71
    def get_input_output_texts(self):
72
73
        input_text = "lower newer"
        output_text = "lower newer"
LysandreJik's avatar
LysandreJik committed
74
75
76
        return input_text, output_text

    def test_full_tokenizer(self):
77
        tokenizer = RobertaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
thomwolf's avatar
thomwolf committed
78
79
        text = "lower newer"
        bpe_tokens = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
80
        tokens = tokenizer.tokenize(text, add_prefix_space=True)
LysandreJik's avatar
LysandreJik committed
81
        self.assertListEqual(tokens, bpe_tokens)
82

LysandreJik's avatar
LysandreJik committed
83
        input_tokens = tokens + [tokenizer.unk_token]
thomwolf's avatar
thomwolf committed
84
        input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
85
        self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
86

LysandreJik's avatar
LysandreJik committed
87
88
    def roberta_dict_integration_testing(self):
        tokenizer = self.get_tokenizer()
89

90
        self.assertListEqual(tokenizer.encode("Hello world!", add_special_tokens=False), [0, 31414, 232, 328, 2])
LysandreJik's avatar
LysandreJik committed
91
        self.assertListEqual(
92
93
            tokenizer.encode("Hello world! c茅c茅 herlolip 418", add_special_tokens=False),
            [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2],
LysandreJik's avatar
LysandreJik committed
94
        )
95

96
    @slow
97
98
99
    def test_sequence_builders(self):
        tokenizer = RobertaTokenizer.from_pretrained("roberta-base")

Lysandre's avatar
Remove  
Lysandre committed
100
101
        text = tokenizer.encode("sequence builders", add_special_tokens=False)
        text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
102

LysandreJik's avatar
LysandreJik committed
103
        encoded_text_from_decode = tokenizer.encode("sequence builders", add_special_tokens=True)
104
105
106
        encoded_pair_from_decode = tokenizer.encode(
            "sequence builders", "multi-sequence build", add_special_tokens=True
        )
LysandreJik's avatar
LysandreJik committed
107

108
109
        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
110

LysandreJik's avatar
LysandreJik committed
111
112
        assert encoded_sentence == encoded_text_from_decode
        assert encoded_pair == encoded_pair_from_decode