test_modeling_beit.py 21 KB
Newer Older
NielsRogge's avatar
NielsRogge committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch BEiT model. """


import unittest

20
from datasets import load_dataset
21
from packaging import version
22

NielsRogge's avatar
NielsRogge committed
23
24
from transformers import BeitConfig
from transformers.models.auto import get_values
25
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
26
from transformers.utils import cached_property, is_torch_available, is_vision_available
NielsRogge's avatar
NielsRogge committed
27

NielsRogge's avatar
NielsRogge committed
28
from ...test_backbone_common import BackboneTesterMixin
Yih-Dar's avatar
Yih-Dar committed
29
30
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
31
from ...test_pipeline_mixin import PipelineTesterMixin
NielsRogge's avatar
NielsRogge committed
32
33
34
35
36
37


if is_torch_available():
    import torch
    from torch import nn

38
    from transformers import (
NielsRogge's avatar
NielsRogge committed
39
        MODEL_FOR_BACKBONE_MAPPING,
40
        MODEL_MAPPING,
NielsRogge's avatar
NielsRogge committed
41
        BeitBackbone,
42
43
44
45
46
        BeitForImageClassification,
        BeitForMaskedImageModeling,
        BeitForSemanticSegmentation,
        BeitModel,
    )
47
    from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
NielsRogge's avatar
NielsRogge committed
48
49
50


if is_vision_available():
51
    import PIL
NielsRogge's avatar
NielsRogge committed
52
53
    from PIL import Image

54
    from transformers import BeitImageProcessor
NielsRogge's avatar
NielsRogge committed
55
56
57
58
59
60
61
62
63
64
65
66
67
68


class BeitModelTester:
    def __init__(
        self,
        parent,
        vocab_size=100,
        batch_size=13,
        image_size=30,
        patch_size=2,
        num_channels=3,
        is_training=True,
        use_labels=True,
        hidden_size=32,
NielsRogge's avatar
NielsRogge committed
69
        num_hidden_layers=4,
NielsRogge's avatar
NielsRogge committed
70
71
72
73
74
75
76
77
78
        num_attention_heads=4,
        intermediate_size=37,
        hidden_act="gelu",
        hidden_dropout_prob=0.1,
        attention_probs_dropout_prob=0.1,
        type_sequence_label_size=10,
        initializer_range=0.02,
        num_labels=3,
        scope=None,
NielsRogge's avatar
NielsRogge committed
79
80
        out_indices=[1, 2, 3, 4],
        out_features=["stage1", "stage2", "stage3", "stage4"],
NielsRogge's avatar
NielsRogge committed
81
82
    ):
        self.parent = parent
NielsRogge's avatar
NielsRogge committed
83
        self.vocab_size = vocab_size
NielsRogge's avatar
NielsRogge committed
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
        self.batch_size = batch_size
        self.image_size = image_size
        self.patch_size = patch_size
        self.num_channels = num_channels
        self.is_training = is_training
        self.use_labels = use_labels
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.type_sequence_label_size = type_sequence_label_size
        self.initializer_range = initializer_range
        self.scope = scope
100
        self.out_indices = out_indices
NielsRogge's avatar
NielsRogge committed
101
        self.out_features = out_features
102
        self.num_labels = num_labels
NielsRogge's avatar
NielsRogge committed
103

NielsRogge's avatar
NielsRogge committed
104
        # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
105
        num_patches = (image_size // patch_size) ** 2
NielsRogge's avatar
NielsRogge committed
106
        self.seq_length = num_patches + 1
107

NielsRogge's avatar
NielsRogge committed
108
109
110
111
    def prepare_config_and_inputs(self):
        pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])

        labels = None
112
        pixel_labels = None
NielsRogge's avatar
NielsRogge committed
113
114
        if self.use_labels:
            labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
115
            pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
NielsRogge's avatar
NielsRogge committed
116
117
118

        config = self.get_config()

119
        return config, pixel_values, labels, pixel_labels
NielsRogge's avatar
NielsRogge committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135

    def get_config(self):
        return BeitConfig(
            vocab_size=self.vocab_size,
            image_size=self.image_size,
            patch_size=self.patch_size,
            num_channels=self.num_channels,
            hidden_size=self.hidden_size,
            num_hidden_layers=self.num_hidden_layers,
            num_attention_heads=self.num_attention_heads,
            intermediate_size=self.intermediate_size,
            hidden_act=self.hidden_act,
            hidden_dropout_prob=self.hidden_dropout_prob,
            attention_probs_dropout_prob=self.attention_probs_dropout_prob,
            is_decoder=False,
            initializer_range=self.initializer_range,
136
            out_indices=self.out_indices,
NielsRogge's avatar
NielsRogge committed
137
            out_features=self.out_features,
NielsRogge's avatar
NielsRogge committed
138
139
        )

140
    def create_and_check_model(self, config, pixel_values, labels, pixel_labels):
NielsRogge's avatar
NielsRogge committed
141
142
143
144
        model = BeitModel(config=config)
        model.to(torch_device)
        model.eval()
        result = model(pixel_values)
NielsRogge's avatar
NielsRogge committed
145
        self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
NielsRogge's avatar
NielsRogge committed
146

NielsRogge's avatar
NielsRogge committed
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
    def create_and_check_backbone(self, config, pixel_values, labels, pixel_labels):
        model = BeitBackbone(config=config)
        model.to(torch_device)
        model.eval()
        result = model(pixel_values)

        # verify hidden states
        self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
        expected_height = expected_width = self.image_size // config.patch_size
        self.parent.assertListEqual(
            list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, expected_height, expected_width]
        )

        # verify channels
        self.parent.assertEqual(len(model.channels), len(config.out_features))

        # verify backbone works with out_features=None
        config.out_features = None
        model = BeitBackbone(config=config)
        model.to(torch_device)
        model.eval()
        result = model(pixel_values)

        # verify feature maps
        self.parent.assertEqual(len(result.feature_maps), 1)
        self.parent.assertListEqual(
            list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, expected_height, expected_width]
        )

        # verify channels
        self.parent.assertEqual(len(model.channels), 1)

179
    def create_and_check_for_masked_lm(self, config, pixel_values, labels, pixel_labels):
NielsRogge's avatar
NielsRogge committed
180
181
182
183
        model = BeitForMaskedImageModeling(config=config)
        model.to(torch_device)
        model.eval()
        result = model(pixel_values)
NielsRogge's avatar
NielsRogge committed
184
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size))
NielsRogge's avatar
NielsRogge committed
185

186
    def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels):
NielsRogge's avatar
NielsRogge committed
187
188
189
190
191
192
193
        config.num_labels = self.type_sequence_label_size
        model = BeitForImageClassification(config)
        model.to(torch_device)
        model.eval()
        result = model(pixel_values, labels=labels)
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))

NielsRogge's avatar
NielsRogge committed
194
195
196
197
198
199
200
201
202
203
        # test greyscale images
        config.num_channels = 1
        model = BeitForImageClassification(config)
        model.to(torch_device)
        model.eval()

        pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
        result = model(pixel_values, labels=labels)
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))

NielsRogge's avatar
NielsRogge committed
204
    def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels):
205
206
207
208
209
210
        config.num_labels = self.num_labels
        model = BeitForSemanticSegmentation(config)
        model.to(torch_device)
        model.eval()
        result = model(pixel_values)
        self.parent.assertEqual(
211
            result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2)
212
213
214
        )
        result = model(pixel_values, labels=pixel_labels)
        self.parent.assertEqual(
215
            result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2)
216
217
        )

NielsRogge's avatar
NielsRogge committed
218
219
    def prepare_config_and_inputs_for_common(self):
        config_and_inputs = self.prepare_config_and_inputs()
220
        config, pixel_values, labels, pixel_labels = config_and_inputs
NielsRogge's avatar
NielsRogge committed
221
222
223
224
225
        inputs_dict = {"pixel_values": pixel_values}
        return config, inputs_dict


@require_torch
226
class BeitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
NielsRogge's avatar
NielsRogge committed
227
228
229
230
231
232
    """
    Here we also overwrite some of the tests of test_modeling_common.py, as BEiT does not use input_ids, inputs_embeds,
    attention_mask and seq_length.
    """

    all_model_classes = (
NielsRogge's avatar
NielsRogge committed
233
234
235
236
237
238
239
        (
            BeitModel,
            BeitForImageClassification,
            BeitForMaskedImageModeling,
            BeitForSemanticSegmentation,
            BeitBackbone,
        )
240
241
        if is_torch_available()
        else ()
NielsRogge's avatar
NielsRogge committed
242
    )
243
244
    pipeline_model_mapping = (
        {
245
            "image-feature-extraction": BeitModel,
246
247
248
249
250
251
            "image-classification": BeitForImageClassification,
            "image-segmentation": BeitForSemanticSegmentation,
        }
        if is_torch_available()
        else {}
    )
NielsRogge's avatar
NielsRogge committed
252
253
254
255
256
257
258
259
260
261
262
263

    test_pruning = False
    test_resize_embeddings = False
    test_head_masking = False

    def setUp(self):
        self.model_tester = BeitModelTester(self)
        self.config_tester = ConfigTester(self, config_class=BeitConfig, has_text_modality=False, hidden_size=37)

    def test_config(self):
        self.config_tester.run_common_tests()

NielsRogge's avatar
NielsRogge committed
264
    @unittest.skip(reason="BEiT does not use inputs_embeds")
NielsRogge's avatar
NielsRogge committed
265
266
267
    def test_inputs_embeds(self):
        pass

268
269
270
271
272
    @require_torch_multi_gpu
    @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`")
    def test_multi_gpu_data_parallel_forward(self):
        pass

NielsRogge's avatar
NielsRogge committed
273
274
275
276
    @unittest.skip(reason="BEiT does not support feedforward chunking yet")
    def test_feed_forward_chunking(self):
        pass

NielsRogge's avatar
NielsRogge committed
277
278
279
280
281
282
283
284
285
286
287
288
289
    def test_model_common_attributes(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
            x = model.get_output_embeddings()
            self.assertTrue(x is None or isinstance(x, nn.Linear))

    def test_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_model(*config_and_inputs)

NielsRogge's avatar
NielsRogge committed
290
291
292
293
    def test_backbone(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_backbone(*config_and_inputs)

NielsRogge's avatar
NielsRogge committed
294
295
296
297
298
299
300
301
302
    def test_for_masked_lm(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)

    def test_for_image_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_for_image_classification(*config_and_inputs)

    def test_for_semantic_segmentation(self):
303
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
NielsRogge's avatar
NielsRogge committed
304
        self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs)
305

NielsRogge's avatar
NielsRogge committed
306
307
308
309
310
311
312
313
314
    def test_training(self):
        if not self.model_tester.is_training:
            return

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.return_dict = True

        for model_class in self.all_model_classes:
            # we don't test BeitForMaskedImageModeling
NielsRogge's avatar
NielsRogge committed
315
316
317
318
319
            if model_class in [
                *get_values(MODEL_MAPPING),
                *get_values(MODEL_FOR_BACKBONE_MAPPING),
                BeitForMaskedImageModeling,
            ]:
320
                continue
321

322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
            model = model_class(config)
            model.to(torch_device)
            model.train()
            inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            loss = model(**inputs).loss
            loss.backward()

    def test_training_gradient_checkpointing(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        if not self.model_tester.is_training:
            return

        config.use_cache = False
        config.return_dict = True

        for model_class in self.all_model_classes:
            # we don't test BeitForMaskedImageModeling
339
            if (
NielsRogge's avatar
NielsRogge committed
340
341
                model_class
                in [*get_values(MODEL_MAPPING), *get_values(MODEL_FOR_BACKBONE_MAPPING), BeitForMaskedImageModeling]
342
343
                or not model_class.supports_gradient_checkpointing
            ):
NielsRogge's avatar
NielsRogge committed
344
                continue
NielsRogge's avatar
NielsRogge committed
345

NielsRogge's avatar
NielsRogge committed
346
            model = model_class(config)
347
            model.gradient_checkpointing_enable()
NielsRogge's avatar
NielsRogge committed
348
349
350
351
352
353
            model.to(torch_device)
            model.train()
            inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            loss = model(**inputs).loss
            loss.backward()

354
355
356
357
358
359
360
361
362
363
364
365
    @unittest.skip(
        reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
    )
    def test_training_gradient_checkpointing_use_reentrant(self):
        pass

    @unittest.skip(
        reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
    )
    def test_training_gradient_checkpointing_use_reentrant_false(self):
        pass

NielsRogge's avatar
NielsRogge committed
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
    def test_initialization(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        configs_no_init = _config_zero_init(config)
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)
            for name, param in model.named_parameters():
                # we skip lambda parameters as these require special initial values
                # determined by config.layer_scale_init_value
                if "lambda" in name:
                    continue
                if param.requires_grad:
                    self.assertIn(
                        ((param.data.mean() * 1e9).round() / 1e9).item(),
                        [0.0, 1.0],
                        msg=f"Parameter {name} of model {model_class} seems not properly initialized",
                    )

    @slow
    def test_model_from_pretrained(self):
        for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
            model = BeitModel.from_pretrained(model_name)
            self.assertIsNotNone(model)


# We will verify our results on an image of cute cats
def prepare_img():
    image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
    return image


397
@require_torch
NielsRogge's avatar
NielsRogge committed
398
399
400
@require_vision
class BeitModelIntegrationTest(unittest.TestCase):
    @cached_property
401
402
    def default_image_processor(self):
        return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224") if is_vision_available() else None
NielsRogge's avatar
NielsRogge committed
403

404
405
406
407
    @slow
    def test_inference_masked_image_modeling_head(self):
        model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k").to(torch_device)

408
        image_processor = self.default_image_processor
409
        image = prepare_img()
410
        pixel_values = image_processor(images=image, return_tensors="pt").pixel_values.to(torch_device)
411
412
413
414
415

        # prepare bool_masked_pos
        bool_masked_pos = torch.ones((1, 196), dtype=torch.bool).to(torch_device)

        # forward pass
416
417
        with torch.no_grad():
            outputs = model(pixel_values=pixel_values, bool_masked_pos=bool_masked_pos)
418
419
420
421
422
423
424
425
426
427
428
429
        logits = outputs.logits

        # verify the logits
        expected_shape = torch.Size((1, 196, 8192))
        self.assertEqual(logits.shape, expected_shape)

        expected_slice = torch.tensor(
            [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]]
        ).to(torch_device)

        self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], expected_slice, atol=1e-2))

NielsRogge's avatar
NielsRogge committed
430
431
432
433
    @slow
    def test_inference_image_classification_head_imagenet_1k(self):
        model = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224").to(torch_device)

434
        image_processor = self.default_image_processor
NielsRogge's avatar
NielsRogge committed
435
        image = prepare_img()
436
        inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
NielsRogge's avatar
NielsRogge committed
437
438

        # forward pass
439
440
        with torch.no_grad():
            outputs = model(**inputs)
NielsRogge's avatar
NielsRogge committed
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
        logits = outputs.logits

        # verify the logits
        expected_shape = torch.Size((1, 1000))
        self.assertEqual(logits.shape, expected_shape)

        expected_slice = torch.tensor([-1.2385, -1.0987, -1.0108]).to(torch_device)

        self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4))

        expected_class_idx = 281
        self.assertEqual(logits.argmax(-1).item(), expected_class_idx)

    @slow
    def test_inference_image_classification_head_imagenet_22k(self):
        model = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k").to(
            torch_device
        )

460
        image_processor = self.default_image_processor
NielsRogge's avatar
NielsRogge committed
461
        image = prepare_img()
462
        inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
NielsRogge's avatar
NielsRogge committed
463
464

        # forward pass
465
466
        with torch.no_grad():
            outputs = model(**inputs)
NielsRogge's avatar
NielsRogge committed
467
468
469
470
471
472
473
474
475
476
477
478
        logits = outputs.logits

        # verify the logits
        expected_shape = torch.Size((1, 21841))
        self.assertEqual(logits.shape, expected_shape)

        expected_slice = torch.tensor([1.6881, -0.2787, 0.5901]).to(torch_device)

        self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4))

        expected_class_idx = 2396
        self.assertEqual(logits.argmax(-1).item(), expected_class_idx)
479
480
481
482
483
484

    @slow
    def test_inference_semantic_segmentation(self):
        model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
        model = model.to(torch_device)

485
        image_processor = BeitImageProcessor(do_resize=True, size=640, do_center_crop=False)
486
487
488

        ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
        image = Image.open(ds[0]["file"])
489
        inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
490
491

        # forward pass
492
493
        with torch.no_grad():
            outputs = model(**inputs)
494
495
496
        logits = outputs.logits

        # verify the logits
497
        expected_shape = torch.Size((1, 150, 160, 160))
498
499
        self.assertEqual(logits.shape, expected_shape)

500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
        is_pillow_less_than_9 = version.parse(PIL.__version__) < version.parse("9.0.0")

        if is_pillow_less_than_9:
            expected_slice = torch.tensor(
                [
                    [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
                    [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
                    [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
                ],
                device=torch_device,
            )
        else:
            expected_slice = torch.tensor(
                [
                    [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
                    [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
                    [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
                ],
                device=torch_device,
            )
520
521

        self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4))
522
523
524
525
526
527

    @slow
    def test_post_processing_semantic_segmentation(self):
        model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
        model = model.to(torch_device)

528
        image_processor = BeitImageProcessor(do_resize=True, size=640, do_center_crop=False)
529
530
531

        ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
        image = Image.open(ds[0]["file"])
532
        inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
533
534
535
536
537
538
539

        # forward pass
        with torch.no_grad():
            outputs = model(**inputs)

        outputs.logits = outputs.logits.detach().cpu()

540
        segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)])
541
542
543
        expected_shape = torch.Size((500, 300))
        self.assertEqual(segmentation[0].shape, expected_shape)

544
        segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs)
545
546
        expected_shape = torch.Size((160, 160))
        self.assertEqual(segmentation[0].shape, expected_shape)
NielsRogge's avatar
NielsRogge committed
547
548
549
550
551
552
553
554
555


@require_torch
class BeitBackboneTest(unittest.TestCase, BackboneTesterMixin):
    all_model_classes = (BeitBackbone,) if is_torch_available() else ()
    config_class = BeitConfig

    def setUp(self):
        self.model_tester = BeitModelTester(self)