test_processor_layoutlmv2.py 24.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
import os
import shutil
import tempfile
import unittest
from typing import List

22
23
import numpy as np

24
25
26
27
from transformers import PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast
from transformers.models.layoutlmv2 import LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast
from transformers.models.layoutlmv2.tokenization_layoutlmv2 import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pytesseract, require_tokenizers, require_torch, slow
28
from transformers.utils import FEATURE_EXTRACTOR_NAME, cached_property, is_pytesseract_available
29
30
31
32
33


if is_pytesseract_available():
    from PIL import Image

34
    from transformers import LayoutLMv2ImageProcessor, LayoutLMv2Processor
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61


@require_pytesseract
@require_tokenizers
class LayoutLMv2ProcessorTest(unittest.TestCase):
    tokenizer_class = LayoutLMv2Tokenizer
    rust_tokenizer_class = LayoutLMv2TokenizerFast

    def setUp(self):
        vocab_tokens = [
            "[UNK]",
            "[CLS]",
            "[SEP]",
            "[PAD]",
            "[MASK]",
            "want",
            "##want",
            "##ed",
            "wa",
            "un",
            "runn",
            "##ing",
            ",",
            "low",
            "lowest",
        ]

62
        image_processor_map = {
63
64
65
66
67
68
69
70
71
            "do_resize": True,
            "size": 224,
            "apply_ocr": True,
        }

        self.tmpdirname = tempfile.mkdtemp()
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
72
73
74
        self.image_processing_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME)
        with open(self.image_processing_file, "w", encoding="utf-8") as fp:
            fp.write(json.dumps(image_processor_map) + "\n")
75
76
77
78
79
80
81
82
83
84

    def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
        return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)

    def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
        return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)

    def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]:
        return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]

85
86
    def get_image_processor(self, **kwargs):
        return LayoutLMv2ImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
87
88
89
90

    def tearDown(self):
        shutil.rmtree(self.tmpdirname)

91
92
93
94
95
96
97
98
99
100
101
    def prepare_image_inputs(self):
        """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
        or a list of PyTorch tensors if one specifies torchify=True.
        """

        image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]

        image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]

        return image_inputs

102
    def test_save_load_pretrained_default(self):
103
        image_processor = self.get_image_processor()
104
105
        tokenizers = self.get_tokenizers()
        for tokenizer in tokenizers:
106
            processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
107
108
109
110
111
112
113

            processor.save_pretrained(self.tmpdirname)
            processor = LayoutLMv2Processor.from_pretrained(self.tmpdirname)

            self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
            self.assertIsInstance(processor.tokenizer, (LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast))

114
115
            self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string())
            self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor)
116
117

    def test_save_load_pretrained_additional_features(self):
118
        processor = LayoutLMv2Processor(image_processor=self.get_image_processor(), tokenizer=self.get_tokenizer())
119
120
121
122
        processor.save_pretrained(self.tmpdirname)

        # slow tokenizer
        tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
123
        image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30)
124
125
126
127
128
129
130
131

        processor = LayoutLMv2Processor.from_pretrained(
            self.tmpdirname, use_fast=False, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
        )

        self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
        self.assertIsInstance(processor.tokenizer, LayoutLMv2Tokenizer)

132
133
        self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
        self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor)
134
135
136

        # fast tokenizer
        tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
137
        image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30)
138
139
140
141
142
143
144
145

        processor = LayoutLMv2Processor.from_pretrained(
            self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
        )

        self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
        self.assertIsInstance(processor.tokenizer, LayoutLMv2TokenizerFast)

146
147
        self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
        self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor)
148

149
    def test_model_input_names(self):
150
        image_processor = self.get_image_processor()
151
152
        tokenizer = self.get_tokenizer()

153
        processor = LayoutLMv2Processor(tokenizer=tokenizer, image_processor=image_processor)
154
155
156
157
158
159
160
161
162

        input_str = "lower newer"
        image_input = self.prepare_image_inputs()

        # add extra args
        inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False)

        self.assertListEqual(list(inputs.keys()), processor.model_input_names)

163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
    @slow
    def test_overflowing_tokens(self):
        # In the case of overflowing tokens, test that we still have 1-to-1 mapping between the images and input_ids (sequences that are too long are broken down into multiple sequences).

        from datasets import load_dataset

        # set up
        datasets = load_dataset("nielsr/funsd")
        processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr")

        def preprocess_data(examples):
            images = [Image.open(path).convert("RGB") for path in examples["image_path"]]
            words = examples["words"]
            boxes = examples["bboxes"]
            word_labels = examples["ner_tags"]
            encoded_inputs = processor(
                images,
                words,
                boxes=boxes,
                word_labels=word_labels,
                padding="max_length",
                truncation=True,
                return_overflowing_tokens=True,
                stride=50,
                return_offsets_mapping=True,
                return_tensors="pt",
            )
            return encoded_inputs

        train_data = preprocess_data(datasets["train"])

        self.assertEqual(len(train_data["image"]), len(train_data["input_ids"]))

196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222

# different use cases tests
@require_torch
@require_pytesseract
class LayoutLMv2ProcessorIntegrationTests(unittest.TestCase):
    @cached_property
    def get_images(self):
        # we verify our implementation on 2 document images from the DocVQA dataset
        from datasets import load_dataset

        ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")

        image_1 = Image.open(ds[0]["file"]).convert("RGB")
        image_2 = Image.open(ds[1]["file"]).convert("RGB")

        return image_1, image_2

    @cached_property
    def get_tokenizers(self):
        slow_tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
        fast_tokenizer = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
        return [slow_tokenizer, fast_tokenizer]

    @slow
    def test_processor_case_1(self):
        # case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True

223
        image_processor = LayoutLMv2ImageProcessor()
224
225
226
227
        tokenizers = self.get_tokenizers
        images = self.get_images

        for tokenizer in tokenizers:
228
            processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
229
230

            # not batched
231
            input_image_proc = image_processor(images[0], return_tensors="pt")
232
233
234
235
            input_processor = processor(images[0], return_tensors="pt")

            # verify keys
            expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
236
            actual_keys = sorted(input_processor.keys())
237
238
239
            self.assertListEqual(actual_keys, expected_keys)

            # verify image
240
            self.assertAlmostEqual(input_image_proc["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2)
241
242

            # verify input_ids
NielsRogge's avatar
NielsRogge committed
243
            # this was obtained with Tesseract 4.1.1
244
            expected_decoding = "[CLS] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]"  # fmt: skip
NielsRogge's avatar
NielsRogge committed
245
            decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
246
247
248
            self.assertSequenceEqual(decoding, expected_decoding)

            # batched
249
            input_image_proc = image_processor(images, return_tensors="pt")
250
251
252
253
            input_processor = processor(images, padding=True, return_tensors="pt")

            # verify keys
            expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
254
            actual_keys = sorted(input_processor.keys())
255
256
257
            self.assertListEqual(actual_keys, expected_keys)

            # verify images
258
            self.assertAlmostEqual(input_image_proc["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2)
259
260

            # verify input_ids
NielsRogge's avatar
NielsRogge committed
261
            # this was obtained with Tesseract 4.1.1
262
            expected_decoding = "[CLS] 7 itc limited report and accounts 2013 itc ’ s brands : an asset for the nation the consumer needs and aspirations they fulfil, the benefit they generate for millions across itc ’ s value chains, the future - ready capabilities that support them, and the value that they create for the country, have made itc ’ s brands national assets, adding to india ’ s competitiveness. it is itc ’ s aspiration to be the no 1 fmcg player in the country, driven by its new fmcg businesses. a recent nielsen report has highlighted that itc's new fmcg businesses are the fastest growing among the top consumer goods companies operating in india. itc takes justifiable pride that, along with generating economic value, these celebrated indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. di wills * ; love delightfully soft skin? aia ans source : https : / / www. industrydocuments. ucsf. edu / docs / snbx0223 [SEP] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD]"  # fmt: skip
NielsRogge's avatar
NielsRogge committed
263
            decoding = processor.decode(input_processor.input_ids[1].tolist())
264
265
266
267
268
269
            self.assertSequenceEqual(decoding, expected_decoding)

    @slow
    def test_processor_case_2(self):
        # case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False

270
        image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
271
272
273
274
        tokenizers = self.get_tokenizers
        images = self.get_images

        for tokenizer in tokenizers:
275
            processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
276
277
278
279
280
281
282
283
284
285
286
287
288
289

            # not batched
            words = ["hello", "world"]
            boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
            input_processor = processor(images[0], words, boxes=boxes, return_tensors="pt")

            # verify keys
            expected_keys = ["input_ids", "bbox", "token_type_ids", "attention_mask", "image"]
            actual_keys = list(input_processor.keys())
            for key in expected_keys:
                self.assertIn(key, actual_keys)

            # verify input_ids
            expected_decoding = "[CLS] hello world [SEP]"
NielsRogge's avatar
NielsRogge committed
290
            decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
291
292
293
294
295
296
297
298
299
            self.assertSequenceEqual(decoding, expected_decoding)

            # batched
            words = [["hello", "world"], ["my", "name", "is", "niels"]]
            boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
            input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors="pt")

            # verify keys
            expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
300
            actual_keys = sorted(input_processor.keys())
301
302
303
304
            self.assertListEqual(actual_keys, expected_keys)

            # verify input_ids
            expected_decoding = "[CLS] hello world [SEP] [PAD] [PAD] [PAD]"
NielsRogge's avatar
NielsRogge committed
305
            decoding = processor.decode(input_processor.input_ids[0].tolist())
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
            self.assertSequenceEqual(decoding, expected_decoding)

            # verify bbox
            expected_bbox = [
                [0, 0, 0, 0],
                [3, 2, 5, 1],
                [6, 7, 4, 2],
                [3, 9, 2, 4],
                [1, 1, 2, 3],
                [1, 1, 2, 3],
                [1000, 1000, 1000, 1000],
            ]
            self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)

    @slow
    def test_processor_case_3(self):
        # case 3: token classification (training), apply_ocr=False

324
        image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
325
326
327
328
        tokenizers = self.get_tokenizers
        images = self.get_images

        for tokenizer in tokenizers:
329
            processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
330
331
332
333
334
335
336
337
338

            # not batched
            words = ["weirdly", "world"]
            boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
            word_labels = [1, 2]
            input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors="pt")

            # verify keys
            expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"]
339
            actual_keys = sorted(input_processor.keys())
340
341
342
343
            self.assertListEqual(actual_keys, expected_keys)

            # verify input_ids
            expected_decoding = "[CLS] weirdly world [SEP]"
NielsRogge's avatar
NielsRogge committed
344
            decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
            self.assertSequenceEqual(decoding, expected_decoding)

            # verify labels
            expected_labels = [-100, 1, -100, 2, -100]
            self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels)

            # batched
            words = [["hello", "world"], ["my", "name", "is", "niels"]]
            boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
            word_labels = [[1, 2], [6, 3, 10, 2]]
            input_processor = processor(
                images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors="pt"
            )

            # verify keys
            expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"]
361
            actual_keys = sorted(input_processor.keys())
362
363
364
365
            self.assertListEqual(actual_keys, expected_keys)

            # verify input_ids
            expected_decoding = "[CLS] my name is niels [SEP]"
NielsRogge's avatar
NielsRogge committed
366
            decoding = processor.decode(input_processor.input_ids[1].tolist())
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
            self.assertSequenceEqual(decoding, expected_decoding)

            # verify bbox
            expected_bbox = [
                [0, 0, 0, 0],
                [3, 2, 5, 1],
                [6, 7, 4, 2],
                [3, 9, 2, 4],
                [1, 1, 2, 3],
                [1, 1, 2, 3],
                [1000, 1000, 1000, 1000],
            ]
            self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)

            # verify labels
            expected_labels = [-100, 6, 3, 10, 2, -100, -100]
            self.assertListEqual(input_processor.labels[1].tolist(), expected_labels)

    @slow
    def test_processor_case_4(self):
        # case 4: visual question answering (inference), apply_ocr=True

389
        image_processor = LayoutLMv2ImageProcessor()
390
391
392
393
        tokenizers = self.get_tokenizers
        images = self.get_images

        for tokenizer in tokenizers:
394
            processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
395
396
397
398
399
400
401

            # not batched
            question = "What's his name?"
            input_processor = processor(images[0], question, return_tensors="pt")

            # verify keys
            expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
402
            actual_keys = sorted(input_processor.keys())
403
404
405
            self.assertListEqual(actual_keys, expected_keys)

            # verify input_ids
NielsRogge's avatar
NielsRogge committed
406
            # this was obtained with Tesseract 4.1.1
407
            expected_decoding = "[CLS] what's his name? [SEP] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]"  # fmt: skip
NielsRogge's avatar
NielsRogge committed
408
            decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
409
410
411
412
413
414
415
416
417
418
            self.assertSequenceEqual(decoding, expected_decoding)

            # batched
            questions = ["How old is he?", "what's the time"]
            input_processor = processor(
                images, questions, padding="max_length", max_length=20, truncation=True, return_tensors="pt"
            )

            # verify keys
            expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
419
            actual_keys = sorted(input_processor.keys())
420
421
422
            self.assertListEqual(actual_keys, expected_keys)

            # verify input_ids
NielsRogge's avatar
NielsRogge committed
423
            # this was obtained with Tesseract 4.1.1
424
            expected_decoding = "[CLS] what's the time [SEP] 7 itc limited report and accounts 2013 itc ’ s [SEP]"
NielsRogge's avatar
NielsRogge committed
425
            decoding = processor.decode(input_processor.input_ids[1].tolist())
426
427
428
            self.assertSequenceEqual(decoding, expected_decoding)

            # verify bbox
429
            expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [1000, 1000, 1000, 1000]]  # fmt: skip
430
431
432
433
434
435
            self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)

    @slow
    def test_processor_case_5(self):
        # case 5: visual question answering (inference), apply_ocr=False

436
        image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
437
438
439
440
        tokenizers = self.get_tokenizers
        images = self.get_images

        for tokenizer in tokenizers:
441
            processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
442
443
444
445
446
447
448
449
450

            # not batched
            question = "What's his name?"
            words = ["hello", "world"]
            boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
            input_processor = processor(images[0], question, words, boxes, return_tensors="pt")

            # verify keys
            expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
451
            actual_keys = sorted(input_processor.keys())
452
453
454
455
            self.assertListEqual(actual_keys, expected_keys)

            # verify input_ids
            expected_decoding = "[CLS] what's his name? [SEP] hello world [SEP]"
NielsRogge's avatar
NielsRogge committed
456
            decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
457
458
459
460
461
462
463
464
465
466
            self.assertSequenceEqual(decoding, expected_decoding)

            # batched
            questions = ["How old is he?", "what's the time"]
            words = [["hello", "world"], ["my", "name", "is", "niels"]]
            boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
            input_processor = processor(images, questions, words, boxes, padding=True, return_tensors="pt")

            # verify keys
            expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
467
            actual_keys = sorted(input_processor.keys())
468
469
470
471
            self.assertListEqual(actual_keys, expected_keys)

            # verify input_ids
            expected_decoding = "[CLS] how old is he? [SEP] hello world [SEP] [PAD] [PAD] [PAD]"
NielsRogge's avatar
NielsRogge committed
472
            decoding = processor.decode(input_processor.input_ids[0].tolist())
473
474
475
            self.assertSequenceEqual(decoding, expected_decoding)

            expected_decoding = "[CLS] what's the time [SEP] my name is niels [SEP]"
NielsRogge's avatar
NielsRogge committed
476
            decoding = processor.decode(input_processor.input_ids[1].tolist())
477
478
479
480
481
            self.assertSequenceEqual(decoding, expected_decoding)

            # verify bbox
            expected_bbox = [[6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
            self.assertListEqual(input_processor.bbox[1].tolist()[-5:], expected_bbox)