test_modeling_convnext.py 11.1 KB
Newer Older
NielsRogge's avatar
NielsRogge committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch ConvNext model. """


import unittest

from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
22
from transformers.utils import cached_property, is_torch_available, is_vision_available
NielsRogge's avatar
NielsRogge committed
23

24
from ...test_backbone_common import BackboneTesterMixin
Yih-Dar's avatar
Yih-Dar committed
25
26
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
27
from ...test_pipeline_mixin import PipelineTesterMixin
NielsRogge's avatar
NielsRogge committed
28
29
30
31
32


if is_torch_available():
    import torch

NielsRogge's avatar
NielsRogge committed
33
    from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
NielsRogge's avatar
NielsRogge committed
34
35
36
37
38
39
    from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST


if is_vision_available():
    from PIL import Image

40
    from transformers import AutoImageProcessor
NielsRogge's avatar
NielsRogge committed
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56


class ConvNextModelTester:
    def __init__(
        self,
        parent,
        batch_size=13,
        image_size=32,
        num_channels=3,
        num_stages=4,
        hidden_sizes=[10, 20, 30, 40],
        depths=[2, 2, 3, 2],
        is_training=True,
        use_labels=True,
        intermediate_size=37,
        hidden_act="gelu",
NielsRogge's avatar
NielsRogge committed
57
        num_labels=10,
NielsRogge's avatar
NielsRogge committed
58
        initializer_range=0.02,
NielsRogge's avatar
NielsRogge committed
59
        out_features=["stage2", "stage3", "stage4"],
60
        out_indices=[2, 3, 4],
NielsRogge's avatar
NielsRogge committed
61
62
63
64
65
66
67
68
69
70
71
72
73
        scope=None,
    ):
        self.parent = parent
        self.batch_size = batch_size
        self.image_size = image_size
        self.num_channels = num_channels
        self.num_stages = num_stages
        self.hidden_sizes = hidden_sizes
        self.depths = depths
        self.is_training = is_training
        self.use_labels = use_labels
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
NielsRogge's avatar
NielsRogge committed
74
        self.num_labels = num_labels
NielsRogge's avatar
NielsRogge committed
75
        self.initializer_range = initializer_range
NielsRogge's avatar
NielsRogge committed
76
        self.out_features = out_features
77
        self.out_indices = out_indices
NielsRogge's avatar
NielsRogge committed
78
79
80
81
82
83
84
        self.scope = scope

    def prepare_config_and_inputs(self):
        pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])

        labels = None
        if self.use_labels:
NielsRogge's avatar
NielsRogge committed
85
            labels = ids_tensor([self.batch_size], self.num_labels)
NielsRogge's avatar
NielsRogge committed
86
87
88
89
90
91
92
93
94
95
96
97
98

        config = self.get_config()
        return config, pixel_values, labels

    def get_config(self):
        return ConvNextConfig(
            num_channels=self.num_channels,
            hidden_sizes=self.hidden_sizes,
            depths=self.depths,
            num_stages=self.num_stages,
            hidden_act=self.hidden_act,
            is_decoder=False,
            initializer_range=self.initializer_range,
NielsRogge's avatar
NielsRogge committed
99
            out_features=self.out_features,
100
            out_indices=self.out_indices,
NielsRogge's avatar
NielsRogge committed
101
            num_labels=self.num_labels,
NielsRogge's avatar
NielsRogge committed
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
        )

    def create_and_check_model(self, config, pixel_values, labels):
        model = ConvNextModel(config=config)
        model.to(torch_device)
        model.eval()
        result = model(pixel_values)
        # expected last hidden states: B, C, H // 32, W // 32
        self.parent.assertEqual(
            result.last_hidden_state.shape,
            (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),
        )

    def create_and_check_for_image_classification(self, config, pixel_values, labels):
        model = ConvNextForImageClassification(config)
        model.to(torch_device)
        model.eval()
        result = model(pixel_values, labels=labels)
NielsRogge's avatar
NielsRogge committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))

    def create_and_check_backbone(self, config, pixel_values, labels):
        model = ConvNextBackbone(config=config)
        model.to(torch_device)
        model.eval()
        result = model(pixel_values)

        # verify hidden states
        self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
        self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4])

        # verify channels
        self.parent.assertEqual(len(model.channels), len(config.out_features))
        self.parent.assertListEqual(model.channels, config.hidden_sizes[1:])

        # verify backbone works with out_features=None
        config.out_features = None
        model = ConvNextBackbone(config=config)
        model.to(torch_device)
        model.eval()
        result = model(pixel_values)

        # verify feature maps
        self.parent.assertEqual(len(result.feature_maps), 1)
        self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1])

        # verify channels
        self.parent.assertEqual(len(model.channels), 1)
        self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
NielsRogge's avatar
NielsRogge committed
150
151
152
153
154
155
156
157
158

    def prepare_config_and_inputs_for_common(self):
        config_and_inputs = self.prepare_config_and_inputs()
        config, pixel_values, labels = config_and_inputs
        inputs_dict = {"pixel_values": pixel_values}
        return config, inputs_dict


@require_torch
159
class ConvNextModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
NielsRogge's avatar
NielsRogge committed
160
161
162
163
164
165
166
167
168
    """
    Here we also overwrite some of the tests of test_modeling_common.py, as ConvNext does not use input_ids, inputs_embeds,
    attention_mask and seq_length.
    """

    all_model_classes = (
        (
            ConvNextModel,
            ConvNextForImageClassification,
NielsRogge's avatar
NielsRogge committed
169
            ConvNextBackbone,
NielsRogge's avatar
NielsRogge committed
170
171
172
173
        )
        if is_torch_available()
        else ()
    )
174
    pipeline_model_mapping = (
175
        {"image-feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
176
177
178
        if is_torch_available()
        else {}
    )
NielsRogge's avatar
NielsRogge committed
179

180
    fx_compatible = True
NielsRogge's avatar
NielsRogge committed
181
182
183
    test_pruning = False
    test_resize_embeddings = False
    test_head_masking = False
184
    has_attentions = False
NielsRogge's avatar
NielsRogge committed
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209

    def setUp(self):
        self.model_tester = ConvNextModelTester(self)
        self.config_tester = ConfigTester(self, config_class=ConvNextConfig, has_text_modality=False, hidden_size=37)

    def test_config(self):
        self.create_and_test_config_common_properties()
        self.config_tester.create_and_test_config_to_json_string()
        self.config_tester.create_and_test_config_to_json_file()
        self.config_tester.create_and_test_config_from_and_save_pretrained()
        self.config_tester.create_and_test_config_with_num_labels()
        self.config_tester.check_config_can_be_init_without_params()
        self.config_tester.check_config_arguments_init()

    def create_and_test_config_common_properties(self):
        return

    @unittest.skip(reason="ConvNext does not use inputs_embeds")
    def test_inputs_embeds(self):
        pass

    @unittest.skip(reason="ConvNext does not support input and output embeddings")
    def test_model_common_attributes(self):
        pass

NielsRogge's avatar
NielsRogge committed
210
211
212
213
    @unittest.skip(reason="ConvNext does not use feedforward chunking")
    def test_feed_forward_chunking(self):
        pass

NielsRogge's avatar
NielsRogge committed
214
215
216
217
    def test_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_model(*config_and_inputs)

218
219
220
221
    def test_backbone(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_backbone(*config_and_inputs)

NielsRogge's avatar
NielsRogge committed
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
    def test_hidden_states_output(self):
        def check_hidden_states_output(inputs_dict, config, model_class):
            model = model_class(config)
            model.to(torch_device)
            model.eval()

            with torch.no_grad():
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))

            hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states

            expected_num_stages = self.model_tester.num_stages
            self.assertEqual(len(hidden_states), expected_num_stages + 1)

            # ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
            self.assertListEqual(
                list(hidden_states[0].shape[-2:]),
                [self.model_tester.image_size // 4, self.model_tester.image_size // 4],
            )

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            inputs_dict["output_hidden_states"] = True
            check_hidden_states_output(inputs_dict, config, model_class)

            # check that output_hidden_states also work using config
            del inputs_dict["output_hidden_states"]
            config.output_hidden_states = True

            check_hidden_states_output(inputs_dict, config, model_class)

    def test_for_image_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_for_image_classification(*config_and_inputs)

    @slow
    def test_model_from_pretrained(self):
        for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
            model = ConvNextModel.from_pretrained(model_name)
            self.assertIsNotNone(model)


# We will verify our results on an image of cute cats
def prepare_img():
    image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
    return image


@require_torch
@require_vision
class ConvNextModelIntegrationTest(unittest.TestCase):
    @cached_property
275
276
    def default_image_processor(self):
        return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") if is_vision_available() else None
NielsRogge's avatar
NielsRogge committed
277
278
279
280
281

    @slow
    def test_inference_image_classification_head(self):
        model = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224").to(torch_device)

282
        image_processor = self.default_image_processor
NielsRogge's avatar
NielsRogge committed
283
        image = prepare_img()
284
        inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
NielsRogge's avatar
NielsRogge committed
285
286
287
288
289
290
291
292
293
294
295
296

        # forward pass
        with torch.no_grad():
            outputs = model(**inputs)

        # verify the logits
        expected_shape = torch.Size((1, 1000))
        self.assertEqual(outputs.logits.shape, expected_shape)

        expected_slice = torch.tensor([-0.0260, -0.4739, 0.1911]).to(torch_device)

        self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
297
298
299
300
301
302
303
304
305
306
307


@require_torch
class ConvNextBackboneTest(unittest.TestCase, BackboneTesterMixin):
    all_model_classes = (ConvNextBackbone,) if is_torch_available() else ()
    config_class = ConvNextConfig

    has_attentions = False

    def setUp(self):
        self.model_tester = ConvNextModelTester(self)