test_feature_extraction_segformer.py 11.9 KB
Newer Older
NielsRogge's avatar
NielsRogge committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import unittest

import numpy as np
20
from datasets import load_dataset
NielsRogge's avatar
NielsRogge committed
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45

from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision

from .test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs


if is_torch_available():
    import torch

if is_vision_available():
    from PIL import Image

    from transformers import SegformerFeatureExtractor


class SegformerFeatureExtractionTester(unittest.TestCase):
    def __init__(
        self,
        parent,
        batch_size=7,
        num_channels=3,
        min_resolution=30,
        max_resolution=400,
        do_resize=True,
46
        size=30,
NielsRogge's avatar
NielsRogge committed
47
48
49
        do_normalize=True,
        image_mean=[0.5, 0.5, 0.5],
        image_std=[0.5, 0.5, 0.5],
50
        reduce_labels=False,
NielsRogge's avatar
NielsRogge committed
51
52
53
54
55
56
57
    ):
        self.parent = parent
        self.batch_size = batch_size
        self.num_channels = num_channels
        self.min_resolution = min_resolution
        self.max_resolution = max_resolution
        self.do_resize = do_resize
58
        self.size = size
NielsRogge's avatar
NielsRogge committed
59
60
61
        self.do_normalize = do_normalize
        self.image_mean = image_mean
        self.image_std = image_std
62
        self.reduce_labels = reduce_labels
NielsRogge's avatar
NielsRogge committed
63
64
65
66

    def prepare_feat_extract_dict(self):
        return {
            "do_resize": self.do_resize,
67
            "size": self.size,
NielsRogge's avatar
NielsRogge committed
68
69
70
            "do_normalize": self.do_normalize,
            "image_mean": self.image_mean,
            "image_std": self.image_std,
71
            "reduce_labels": self.reduce_labels,
NielsRogge's avatar
NielsRogge committed
72
73
74
        }


75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
def prepare_semantic_single_inputs():
    dataset = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")

    image = Image.open(dataset[0]["file"])
    map = Image.open(dataset[1]["file"])

    return image, map


def prepare_semantic_batch_inputs():
    dataset = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")

    image1 = Image.open(dataset[0]["file"])
    map1 = Image.open(dataset[1]["file"])
    image2 = Image.open(dataset[2]["file"])
    map2 = Image.open(dataset[3]["file"])

    return [image1, image2], [map1, map2]


NielsRogge's avatar
NielsRogge committed
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
@require_torch
@require_vision
class SegformerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):

    feature_extraction_class = SegformerFeatureExtractor if is_vision_available() else None

    def setUp(self):
        self.feature_extract_tester = SegformerFeatureExtractionTester(self)

    @property
    def feat_extract_dict(self):
        return self.feature_extract_tester.prepare_feat_extract_dict()

    def test_feat_extract_properties(self):
        feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
        self.assertTrue(hasattr(feature_extractor, "do_resize"))
111
        self.assertTrue(hasattr(feature_extractor, "size"))
NielsRogge's avatar
NielsRogge committed
112
113
114
        self.assertTrue(hasattr(feature_extractor, "do_normalize"))
        self.assertTrue(hasattr(feature_extractor, "image_mean"))
        self.assertTrue(hasattr(feature_extractor, "image_std"))
115
        self.assertTrue(hasattr(feature_extractor, "reduce_labels"))
NielsRogge's avatar
NielsRogge committed
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134

    def test_batch_feature(self):
        pass

    def test_call_pil(self):
        # Initialize feature_extractor
        feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
        # create random PIL images
        image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
        for image in image_inputs:
            self.assertIsInstance(image, Image.Image)

        # Test not batched input
        encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
        self.assertEqual(
            encoded_images.shape,
            (
                1,
                self.feature_extract_tester.num_channels,
135
136
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
NielsRogge's avatar
NielsRogge committed
137
138
139
140
141
142
143
144
145
146
            ),
        )

        # Test batched
        encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
        self.assertEqual(
            encoded_images.shape,
            (
                self.feature_extract_tester.batch_size,
                self.feature_extract_tester.num_channels,
147
148
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
NielsRogge's avatar
NielsRogge committed
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
            ),
        )

    def test_call_numpy(self):
        # Initialize feature_extractor
        feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
        # create random numpy tensors
        image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True)
        for image in image_inputs:
            self.assertIsInstance(image, np.ndarray)

        # Test not batched input
        encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
        self.assertEqual(
            encoded_images.shape,
            (
                1,
                self.feature_extract_tester.num_channels,
167
168
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
NielsRogge's avatar
NielsRogge committed
169
170
171
172
173
174
175
176
177
178
            ),
        )

        # Test batched
        encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
        self.assertEqual(
            encoded_images.shape,
            (
                self.feature_extract_tester.batch_size,
                self.feature_extract_tester.num_channels,
179
180
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
NielsRogge's avatar
NielsRogge committed
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
            ),
        )

    def test_call_pytorch(self):
        # Initialize feature_extractor
        feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
        # create random PyTorch tensors
        image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
        for image in image_inputs:
            self.assertIsInstance(image, torch.Tensor)

        # Test not batched input
        encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
        self.assertEqual(
            encoded_images.shape,
            (
                1,
                self.feature_extract_tester.num_channels,
199
200
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
NielsRogge's avatar
NielsRogge committed
201
202
203
204
205
206
207
208
209
210
            ),
        )

        # Test batched
        encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
        self.assertEqual(
            encoded_images.shape,
            (
                self.feature_extract_tester.batch_size,
                self.feature_extract_tester.num_channels,
211
212
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
NielsRogge's avatar
NielsRogge committed
213
214
215
            ),
        )

216
    def test_call_segmentation_maps(self):
NielsRogge's avatar
NielsRogge committed
217
        # Initialize feature_extractor
218
        feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
NielsRogge's avatar
NielsRogge committed
219
220
        # create random PyTorch tensors
        image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
221
        maps = []
NielsRogge's avatar
NielsRogge committed
222
223
        for image in image_inputs:
            self.assertIsInstance(image, torch.Tensor)
224
            maps.append(torch.zeros(image.shape[-2:]).long())
NielsRogge's avatar
NielsRogge committed
225
226

        # Test not batched input
227
        encoding = feature_extractor(image_inputs[0], maps[0], return_tensors="pt")
NielsRogge's avatar
NielsRogge committed
228
        self.assertEqual(
229
            encoding["pixel_values"].shape,
NielsRogge's avatar
NielsRogge committed
230
231
232
            (
                1,
                self.feature_extract_tester.num_channels,
233
234
235
236
237
238
239
240
241
242
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
            ),
        )
        self.assertEqual(
            encoding["labels"].shape,
            (
                1,
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
NielsRogge's avatar
NielsRogge committed
243
244
            ),
        )
245
246
247
        self.assertEqual(encoding["labels"].dtype, torch.long)
        self.assertTrue(encoding["labels"].min().item() >= 0)
        self.assertTrue(encoding["labels"].max().item() <= 255)
NielsRogge's avatar
NielsRogge committed
248
249

        # Test batched
250
        encoding = feature_extractor(image_inputs, maps, return_tensors="pt")
NielsRogge's avatar
NielsRogge committed
251
        self.assertEqual(
252
            encoding["pixel_values"].shape,
NielsRogge's avatar
NielsRogge committed
253
254
255
            (
                self.feature_extract_tester.batch_size,
                self.feature_extract_tester.num_channels,
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
            ),
        )
        self.assertEqual(
            encoding["labels"].shape,
            (
                self.feature_extract_tester.batch_size,
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
            ),
        )
        self.assertEqual(encoding["labels"].dtype, torch.long)
        self.assertTrue(encoding["labels"].min().item() >= 0)
        self.assertTrue(encoding["labels"].max().item() <= 255)

        # Test not batched input (PIL images)
        image, segmentation_map = prepare_semantic_single_inputs()

        encoding = feature_extractor(image, segmentation_map, return_tensors="pt")
        self.assertEqual(
            encoding["pixel_values"].shape,
            (
                1,
                self.feature_extract_tester.num_channels,
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
            ),
        )
        self.assertEqual(
            encoding["labels"].shape,
            (
                1,
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
            ),
        )
        self.assertEqual(encoding["labels"].dtype, torch.long)
        self.assertTrue(encoding["labels"].min().item() >= 0)
        self.assertTrue(encoding["labels"].max().item() <= 255)

        # Test batched input (PIL images)
        images, segmentation_maps = prepare_semantic_batch_inputs()

        encoding = feature_extractor(images, segmentation_maps, return_tensors="pt")
        self.assertEqual(
            encoding["pixel_values"].shape,
            (
                2,
                self.feature_extract_tester.num_channels,
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
NielsRogge's avatar
NielsRogge committed
308
309
            ),
        )
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
        self.assertEqual(
            encoding["labels"].shape,
            (
                2,
                self.feature_extract_tester.size,
                self.feature_extract_tester.size,
            ),
        )
        self.assertEqual(encoding["labels"].dtype, torch.long)
        self.assertTrue(encoding["labels"].min().item() >= 0)
        self.assertTrue(encoding["labels"].max().item() <= 255)

    def test_reduce_labels(self):
        # Initialize feature_extractor
        feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)

        # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
        image, map = prepare_semantic_single_inputs()
        encoding = feature_extractor(image, map, return_tensors="pt")
        self.assertTrue(encoding["labels"].min().item() >= 0)
        self.assertTrue(encoding["labels"].max().item() <= 150)

        feature_extractor.reduce_labels = True
        encoding = feature_extractor(image, map, return_tensors="pt")
        self.assertTrue(encoding["labels"].min().item() >= 0)
        self.assertTrue(encoding["labels"].max().item() <= 255)