test_tokenization_utils.py 12.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 HuggingFace Inc..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
15
16
17
"""
isort:skip_file
"""
18
import os
19
import pickle
20
import tempfile
21
import unittest
22
from typing import Callable, Optional
Aymeric Augustin's avatar
Aymeric Augustin committed
23

24
25
import numpy as np

26
27
28
29
30
from transformers import (
    BatchEncoding,
    BertTokenizer,
    BertTokenizerFast,
    PreTrainedTokenizer,
31
    PreTrainedTokenizerFast,
32
33
34
35
    TensorType,
    TokenSpan,
    is_tokenizers_available,
)
Sylvain Gugger's avatar
Sylvain Gugger committed
36
from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
37
from transformers.testing_utils import CaptureStderr, require_flax, require_tf, require_tokenizers, require_torch, slow
38

39

40
41
42
43
44
if is_tokenizers_available():
    from tokenizers import Tokenizer
    from tokenizers.models import WordPiece


45
class TokenizerUtilsTest(unittest.TestCase):
46
47
48
49
50
    def check_tokenizer_from_pretrained(self, tokenizer_class):
        s3_models = list(tokenizer_class.max_model_input_sizes.keys())
        for model_name in s3_models[:1]:
            tokenizer = tokenizer_class.from_pretrained(model_name)
            self.assertIsNotNone(tokenizer)
51
            self.assertIsInstance(tokenizer, tokenizer_class)
52
53
            self.assertIsInstance(tokenizer, PreTrainedTokenizer)

54
            for special_tok in tokenizer.all_special_tokens:
Aymeric Augustin's avatar
Aymeric Augustin committed
55
                self.assertIsInstance(special_tok, str)
56
57
58
                special_tok_id = tokenizer.convert_tokens_to_ids(special_tok)
                self.assertIsInstance(special_tok_id, int)

59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
    def assert_dump_and_restore(self, be_original: BatchEncoding, equal_op: Optional[Callable] = None):
        batch_encoding_str = pickle.dumps(be_original)
        self.assertIsNotNone(batch_encoding_str)

        be_restored = pickle.loads(batch_encoding_str)

        # Ensure is_fast is correctly restored
        self.assertEqual(be_restored.is_fast, be_original.is_fast)

        # Ensure encodings are potentially correctly restored
        if be_original.is_fast:
            self.assertIsNotNone(be_restored.encodings)
        else:
            self.assertIsNone(be_restored.encodings)

        # Ensure the keys are the same
        for original_v, restored_v in zip(be_original.values(), be_restored.values()):
            if equal_op:
                self.assertTrue(equal_op(restored_v, original_v))
            else:
                self.assertEqual(restored_v, original_v)

81
    @slow
82
83
    def test_pretrained_tokenizers(self):
        self.check_tokenizer_from_pretrained(GPT2Tokenizer)
84

85
    def test_tensor_type_from_str(self):
86
87
88
        self.assertEqual(TensorType("tf"), TensorType.TENSORFLOW)
        self.assertEqual(TensorType("pt"), TensorType.PYTORCH)
        self.assertEqual(TensorType("np"), TensorType.NUMPY)
89

90
    @require_tokenizers
91
92
93
    def test_batch_encoding_pickle(self):
        import numpy as np

94
95
        tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
        tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased")
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114

        # Python no tensor
        with self.subTest("BatchEncoding (Python, return_tensors=None)"):
            self.assert_dump_and_restore(tokenizer_p("Small example to encode"))

        with self.subTest("BatchEncoding (Python, return_tensors=NUMPY)"):
            self.assert_dump_and_restore(
                tokenizer_p("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal
            )

        with self.subTest("BatchEncoding (Rust, return_tensors=None)"):
            self.assert_dump_and_restore(tokenizer_r("Small example to encode"))

        with self.subTest("BatchEncoding (Rust, return_tensors=NUMPY)"):
            self.assert_dump_and_restore(
                tokenizer_r("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal
            )

    @require_tf
115
    @require_tokenizers
116
117
118
119
120
121
    def test_batch_encoding_pickle_tf(self):
        import tensorflow as tf

        def tf_array_equals(t1, t2):
            return tf.reduce_all(tf.equal(t1, t2))

122
123
        tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
        tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased")
124
125
126
127
128
129
130
131
132
133
134
135

        with self.subTest("BatchEncoding (Python, return_tensors=TENSORFLOW)"):
            self.assert_dump_and_restore(
                tokenizer_p("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals
            )

        with self.subTest("BatchEncoding (Rust, return_tensors=TENSORFLOW)"):
            self.assert_dump_and_restore(
                tokenizer_r("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals
            )

    @require_torch
136
    @require_tokenizers
137
138
139
    def test_batch_encoding_pickle_pt(self):
        import torch

140
141
        tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
        tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased")
142
143
144
145
146
147
148
149
150
151
152

        with self.subTest("BatchEncoding (Python, return_tensors=PYTORCH)"):
            self.assert_dump_and_restore(
                tokenizer_p("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal
            )

        with self.subTest("BatchEncoding (Rust, return_tensors=PYTORCH)"):
            self.assert_dump_and_restore(
                tokenizer_r("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal
            )

153
    @require_tokenizers
154
    def test_batch_encoding_is_fast(self):
155
156
        tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
        tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased")
157
158
159
160
161
162

        with self.subTest("Python Tokenizer"):
            self.assertFalse(tokenizer_p("Small example to_encode").is_fast)

        with self.subTest("Rust Tokenizer"):
            self.assertTrue(tokenizer_r("Small example to_encode").is_fast)
163

164
165
    @require_tokenizers
    def test_batch_encoding_word_to_tokens(self):
166
        tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased")
167
168
169
170
171
172
        encoded = tokenizer_r(["Test", "\xad", "test"], is_split_into_words=True)

        self.assertEqual(encoded.word_to_tokens(0), TokenSpan(start=1, end=2))
        self.assertEqual(encoded.word_to_tokens(1), None)
        self.assertEqual(encoded.word_to_tokens(2), TokenSpan(start=2, end=3))

173
174
175
176
177
    def test_batch_encoding_with_labels(self):
        batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]})
        tensor_batch = batch.convert_to_tensors(tensor_type="np")
        self.assertEqual(tensor_batch["inputs"].shape, (2, 3))
        self.assertEqual(tensor_batch["labels"].shape, (2,))
178
179
180
181
        # test converting the converted
        with CaptureStderr() as cs:
            tensor_batch = batch.convert_to_tensors(tensor_type="np")
        self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}")
182
183
184
185
186
187
188
189
190
191
192
193

        batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0})
        tensor_batch = batch.convert_to_tensors(tensor_type="np", prepend_batch_axis=True)
        self.assertEqual(tensor_batch["inputs"].shape, (1, 3))
        self.assertEqual(tensor_batch["labels"].shape, (1,))

    @require_torch
    def test_batch_encoding_with_labels_pt(self):
        batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]})
        tensor_batch = batch.convert_to_tensors(tensor_type="pt")
        self.assertEqual(tensor_batch["inputs"].shape, (2, 3))
        self.assertEqual(tensor_batch["labels"].shape, (2,))
194
195
196
197
        # test converting the converted
        with CaptureStderr() as cs:
            tensor_batch = batch.convert_to_tensors(tensor_type="pt")
        self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}")
198
199
200
201
202
203
204
205
206
207
208
209

        batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0})
        tensor_batch = batch.convert_to_tensors(tensor_type="pt", prepend_batch_axis=True)
        self.assertEqual(tensor_batch["inputs"].shape, (1, 3))
        self.assertEqual(tensor_batch["labels"].shape, (1,))

    @require_tf
    def test_batch_encoding_with_labels_tf(self):
        batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]})
        tensor_batch = batch.convert_to_tensors(tensor_type="tf")
        self.assertEqual(tensor_batch["inputs"].shape, (2, 3))
        self.assertEqual(tensor_batch["labels"].shape, (2,))
210
211
212
213
        # test converting the converted
        with CaptureStderr() as cs:
            tensor_batch = batch.convert_to_tensors(tensor_type="tf")
        self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}")
214
215
216
217
218
219

        batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0})
        tensor_batch = batch.convert_to_tensors(tensor_type="tf", prepend_batch_axis=True)
        self.assertEqual(tensor_batch["inputs"].shape, (1, 3))
        self.assertEqual(tensor_batch["labels"].shape, (1,))

220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
    @require_flax
    def test_batch_encoding_with_labels_jax(self):
        batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]})
        tensor_batch = batch.convert_to_tensors(tensor_type="jax")
        self.assertEqual(tensor_batch["inputs"].shape, (2, 3))
        self.assertEqual(tensor_batch["labels"].shape, (2,))
        # test converting the converted
        with CaptureStderr() as cs:
            tensor_batch = batch.convert_to_tensors(tensor_type="jax")
        self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}")

        batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0})
        tensor_batch = batch.convert_to_tensors(tensor_type="jax", prepend_batch_axis=True)
        self.assertEqual(tensor_batch["inputs"].shape, (1, 3))
        self.assertEqual(tensor_batch["labels"].shape, (1,))

236
237
    def test_padding_accepts_tensors(self):
        features = [{"input_ids": np.array([0, 1, 2])}, {"input_ids": np.array([0, 1, 2, 3])}]
238
        tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
239
240
241
242
243
244
245
246
247
248
249
250
251

        batch = tokenizer.pad(features, padding=True)
        self.assertTrue(isinstance(batch["input_ids"], np.ndarray))
        self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
        batch = tokenizer.pad(features, padding=True, return_tensors="np")
        self.assertTrue(isinstance(batch["input_ids"], np.ndarray))
        self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])

    @require_torch
    def test_padding_accepts_tensors_pt(self):
        import torch

        features = [{"input_ids": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3])}]
252
        tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
253
254
255
256
257
258
259
260
261
262
263
264
265

        batch = tokenizer.pad(features, padding=True)
        self.assertTrue(isinstance(batch["input_ids"], torch.Tensor))
        self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
        batch = tokenizer.pad(features, padding=True, return_tensors="pt")
        self.assertTrue(isinstance(batch["input_ids"], torch.Tensor))
        self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])

    @require_tf
    def test_padding_accepts_tensors_tf(self):
        import tensorflow as tf

        features = [{"input_ids": tf.constant([0, 1, 2])}, {"input_ids": tf.constant([0, 1, 2, 3])}]
266
        tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
267
268
269
270
271
272
273

        batch = tokenizer.pad(features, padding=True)
        self.assertTrue(isinstance(batch["input_ids"], tf.Tensor))
        self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
        batch = tokenizer.pad(features, padding=True, return_tensors="tf")
        self.assertTrue(isinstance(batch["input_ids"], tf.Tensor))
        self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
274
275
276
277
278
279
280
281
282
283
284
285

    @require_tokenizers
    def test_instantiation_from_tokenizers(self):
        bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
        PreTrainedTokenizerFast(tokenizer_object=bert_tokenizer)

    @require_tokenizers
    def test_instantiation_from_tokenizers_json_file(self):
        bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
        with tempfile.TemporaryDirectory() as tmpdirname:
            bert_tokenizer.save(os.path.join(tmpdirname, "tokenizer.json"))
            PreTrainedTokenizerFast(tokenizer_file=os.path.join(tmpdirname, "tokenizer.json"))