test_modeling_tf_vit.py 14.3 KB
Newer Older
Yih-Dar's avatar
Yih-Dar committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the TensorFlow ViT model. """


import inspect
import os
import tempfile
import unittest

from transformers import ViTConfig
Yih-Dar's avatar
Yih-Dar committed
24
from transformers.testing_utils import require_tf, require_vision, slow, tooslow
25
from transformers.utils import cached_property, is_tf_available, is_vision_available
Yih-Dar's avatar
Yih-Dar committed
26

27
28
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
Yih-Dar's avatar
Yih-Dar committed
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82


if is_tf_available():
    import tensorflow as tf

    from transformers import TFViTForImageClassification, TFViTModel


if is_vision_available():
    from PIL import Image

    from transformers import ViTFeatureExtractor


class TFViTModelTester:
    def __init__(
        self,
        parent,
        batch_size=13,
        image_size=30,
        patch_size=2,
        num_channels=3,
        is_training=True,
        use_labels=True,
        hidden_size=32,
        num_hidden_layers=5,
        num_attention_heads=4,
        intermediate_size=37,
        hidden_act="gelu",
        hidden_dropout_prob=0.1,
        attention_probs_dropout_prob=0.1,
        type_sequence_label_size=10,
        initializer_range=0.02,
        num_labels=3,
        scope=None,
    ):
        self.parent = parent
        self.batch_size = batch_size
        self.image_size = image_size
        self.patch_size = patch_size
        self.num_channels = num_channels
        self.is_training = is_training
        self.use_labels = use_labels
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.type_sequence_label_size = type_sequence_label_size
        self.initializer_range = initializer_range
        self.scope = scope

83
84
85
86
        # in ViT, the expected seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
        num_patches = (image_size // patch_size) ** 2
        self.expected_seq_length = num_patches + 1

Yih-Dar's avatar
Yih-Dar committed
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
    def prepare_config_and_inputs(self):
        pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])

        labels = None
        if self.use_labels:
            labels = ids_tensor([self.batch_size], self.type_sequence_label_size)

        config = self.get_config()

        return config, pixel_values, labels

    def get_config(self):
        return ViTConfig(
            image_size=self.image_size,
            patch_size=self.patch_size,
            num_channels=self.num_channels,
            hidden_size=self.hidden_size,
            num_hidden_layers=self.num_hidden_layers,
            num_attention_heads=self.num_attention_heads,
            intermediate_size=self.intermediate_size,
            hidden_act=self.hidden_act,
            hidden_dropout_prob=self.hidden_dropout_prob,
            attention_probs_dropout_prob=self.attention_probs_dropout_prob,
            is_decoder=False,
            initializer_range=self.initializer_range,
        )

    def create_and_check_model(self, config, pixel_values, labels):
        model = TFViTModel(config=config)
        result = model(pixel_values, training=False)
117
118
119
        self.parent.assertEqual(
            result.last_hidden_state.shape, (self.batch_size, self.expected_seq_length, self.hidden_size)
        )
Yih-Dar's avatar
Yih-Dar committed
120
121
122
123
124

        # Test with an image with different size than the one specified in config.
        image_size = self.image_size // 2
        pixel_values = pixel_values[:, :, :image_size, :image_size]
        result = model(pixel_values, interpolate_pos_encoding=True, training=False)
125
126
127
128
        expected_seq_length = (image_size // self.patch_size) ** 2 + 1
        self.parent.assertEqual(
            result.last_hidden_state.shape, (self.batch_size, expected_seq_length, self.hidden_size)
        )
Yih-Dar's avatar
Yih-Dar committed
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203

    def create_and_check_for_image_classification(self, config, pixel_values, labels):
        config.num_labels = self.type_sequence_label_size
        model = TFViTForImageClassification(config)
        result = model(pixel_values, labels=labels, training=False)
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))

        # Test with an image with different size than the one specified in config.
        image_size = self.image_size // 2
        pixel_values = pixel_values[:, :, :image_size, :image_size]
        result = model(pixel_values, interpolate_pos_encoding=True, training=False)
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))

    def prepare_config_and_inputs_for_common(self):
        config_and_inputs = self.prepare_config_and_inputs()
        config, pixel_values, labels = config_and_inputs
        inputs_dict = {"pixel_values": pixel_values}
        return config, inputs_dict


@require_tf
class TFViTModelTest(TFModelTesterMixin, unittest.TestCase):
    """
    Here we also overwrite some of the tests of test_modeling_tf_common.py, as ViT does not use input_ids, inputs_embeds,
    attention_mask and seq_length.
    """

    all_model_classes = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()

    test_resize_embeddings = False
    test_head_masking = False
    test_onnx = False

    def setUp(self):
        self.model_tester = TFViTModelTester(self)
        self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37)

    def test_config(self):
        self.config_tester.run_common_tests()

    def test_inputs_embeds(self):
        # ViT does not use inputs_embeds
        pass

    def test_graph_mode_with_inputs_embeds(self):
        # ViT does not use inputs_embeds
        pass

    def test_model_common_attributes(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer))
            x = model.get_output_embeddings()
            self.assertTrue(x is None or isinstance(x, tf.keras.layers.Layer))

    def test_forward_signature(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            signature = inspect.signature(model.call)
            # signature.parameters is an OrderedDict => so arg_names order is deterministic
            arg_names = [*signature.parameters.keys()]

            expected_arg_names = ["pixel_values"]
            self.assertListEqual(arg_names[:1], expected_arg_names)

    def test_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_model(*config_and_inputs)

    # overwrite from common since `encoder_seq_length` and `encoder_key_length` are calculated
    # in a different way than in text models.
Yih-Dar's avatar
Yih-Dar committed
204
    @tooslow
Yih-Dar's avatar
Yih-Dar committed
205
206
207
208
209
210
211
212
213
    def test_saved_model_creation_extended(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_hidden_states = True
        config.output_attentions = True

        if hasattr(config, "use_cache"):
            config.use_cache = True

        # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
214
        seq_len = self.model_tester.expected_seq_length
Yih-Dar's avatar
Yih-Dar committed
215
216
217
218
219
220
221
222
223
224
225
226

        for model_class in self.all_model_classes:
            class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
            model = model_class(config)
            num_out = len(model(class_inputs_dict))

            with tempfile.TemporaryDirectory() as tmpdirname:
                model.save_pretrained(tmpdirname, saved_model=True)
                saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
                model = tf.keras.models.load_model(saved_model_dir)
                outputs = model(class_inputs_dict)

227
228
                output_hidden_states = outputs["hidden_states"]
                output_attentions = outputs["attentions"]
Yih-Dar's avatar
Yih-Dar committed
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244

                self.assertEqual(len(outputs), num_out)

                expected_num_layers = getattr(
                    self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
                )

                self.assertEqual(len(output_hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(output_hidden_states[0].shape[-2:]),
                    [seq_len, self.model_tester.hidden_size],
                )

                self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers)
                self.assertListEqual(
                    list(output_attentions[0].shape[-3:]),
245
                    [self.model_tester.num_attention_heads, seq_len, seq_len],
Yih-Dar's avatar
Yih-Dar committed
246
247
248
249
250
251
252
                )

    def test_attention_outputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.return_dict = True

        # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
253
        seq_len = self.model_tester.expected_seq_length
Yih-Dar's avatar
Yih-Dar committed
254
255
256
257
258
259
260

        for model_class in self.all_model_classes:
            inputs_dict["output_attentions"] = True
            inputs_dict["output_hidden_states"] = False
            config.return_dict = True
            model = model_class(config)
            outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
261
            attentions = outputs.attentions
Yih-Dar's avatar
Yih-Dar committed
262
263
264
265
266
267
268
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)

            # check that output_attentions also work using config
            del inputs_dict["output_attentions"]
            config.output_attentions = True
            model = model_class(config)
            outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
269
            attentions = outputs.attentions
Yih-Dar's avatar
Yih-Dar committed
270
271
272
273
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)

            self.assertListEqual(
                list(attentions[0].shape[-3:]),
274
                [self.model_tester.num_attention_heads, seq_len, seq_len],
Yih-Dar's avatar
Yih-Dar committed
275
276
277
278
279
280
281
282
283
            )
            out_len = len(outputs)

            # Check attention is always last and order is fine
            inputs_dict["output_attentions"] = True
            inputs_dict["output_hidden_states"] = True
            model = model_class(config)
            outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)

284
            self.assertEqual(out_len + 1, len(outputs))
Yih-Dar's avatar
Yih-Dar committed
285

286
            self_attentions = outputs.attentions
Yih-Dar's avatar
Yih-Dar committed
287
288
289
290

            self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(self_attentions[0].shape[-3:]),
291
                [self.model_tester.num_attention_heads, seq_len, seq_len],
Yih-Dar's avatar
Yih-Dar committed
292
293
294
295
296
297
298
299
            )

    def test_hidden_states_output(self):
        def check_hidden_states_output(inputs_dict, config, model_class):
            model = model_class(config)

            outputs = model(**self._prepare_for_class(inputs_dict, model_class))

300
            hidden_states = outputs.hidden_states
Yih-Dar's avatar
Yih-Dar committed
301
302
303
304
305
306
307

            expected_num_layers = getattr(
                self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
            )
            self.assertEqual(len(hidden_states), expected_num_layers)

            # ViT has a different seq_length
308
            seq_length = self.model_tester.expected_seq_length
Yih-Dar's avatar
Yih-Dar committed
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333

            self.assertListEqual(
                list(hidden_states[0].shape[-2:]),
                [seq_length, self.model_tester.hidden_size],
            )

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            inputs_dict["output_hidden_states"] = True
            check_hidden_states_output(inputs_dict, config, model_class)

            # check that output_hidden_states also work using config
            del inputs_dict["output_hidden_states"]
            config.output_hidden_states = True

            check_hidden_states_output(inputs_dict, config, model_class)

    def test_for_image_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_for_image_classification(*config_and_inputs)

    @slow
    def test_model_from_pretrained(self):

334
        model = TFViTModel.from_pretrained("google/vit-base-patch16-224")
Yih-Dar's avatar
Yih-Dar committed
335
336
337
338
339
340
341
342
343
        self.assertIsNotNone(model)


# We will verify our results on an image of cute cats
def prepare_img():
    image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
    return image


344
@require_tf
Yih-Dar's avatar
Yih-Dar committed
345
346
347
348
349
350
351
352
@require_vision
class TFViTModelIntegrationTest(unittest.TestCase):
    @cached_property
    def default_feature_extractor(self):
        return ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None

    @slow
    def test_inference_image_classification_head(self):
NielsRogge's avatar
NielsRogge committed
353
        model = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224")
Yih-Dar's avatar
Yih-Dar committed
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368

        feature_extractor = self.default_feature_extractor
        image = prepare_img()
        inputs = feature_extractor(images=image, return_tensors="tf")

        # forward pass
        outputs = model(**inputs)

        # verify the logits
        expected_shape = tf.TensorShape((1, 1000))
        self.assertEqual(outputs.logits.shape, expected_shape)

        expected_slice = tf.constant([-0.2744, 0.8215, -0.0836])

        tf.debugging.assert_near(outputs.logits[0, :3], expected_slice, atol=1e-4)