test_models.py 16.8 KB
Newer Older
1
from common_utils import TestCase, map_nested_tensor_object, freeze_rng_state
2
3
from collections import OrderedDict
from itertools import product
4
import torch
5
import torch.nn as nn
eellison's avatar
eellison committed
6
import numpy as np
7
8
from torchvision import models
import unittest
eellison's avatar
eellison committed
9
10
import random

11
from torchvision.models.detection._utils import overwrite_eps
12

eellison's avatar
eellison committed
13

14
def set_rng_seed(seed):
eellison's avatar
eellison committed
15
16
17
    torch.manual_seed(seed)
    random.seed(seed)
    np.random.seed(seed)
18
19


20
21
22
def get_available_classification_models():
    # TODO add a registration mechanism to torchvision.models
    return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
23
24
25
26
27


def get_available_segmentation_models():
    # TODO add a registration mechanism to torchvision.models
    return [k for k, v in models.segmentation.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
28
29


30
31
32
33
34
def get_available_detection_models():
    # TODO add a registration mechanism to torchvision.models
    return [k for k, v in models.detection.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]


35
36
37
38
39
def get_available_video_models():
    # TODO add a registration mechanism to torchvision.models
    return [k for k, v in models.video.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]


40
41
42
# models that are in torch hub, as well as r3d_18. we tried testing all models
# but the test was too slow. not included are detection models, because
# they are not yet supported in JIT.
43
44
45
46
# If 'unwrapper' is provided it will be called with the script model outputs
# before they are compared to the eager model outputs. This is useful if the
# model outputs are different between TorchScript / Eager mode
script_test_models = {
47
    'deeplabv3_resnet50': {},
48
49
50
    'deeplabv3_resnet101': {},
    'mobilenet_v2': {},
    'resnext50_32x4d': {},
51
    'fcn_resnet50': {},
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
    'fcn_resnet101': {},
    'googlenet': {
        'unwrapper': lambda x: x.logits
    },
    'densenet121': {},
    'resnet18': {},
    'alexnet': {},
    'shufflenet_v2_x1_0': {},
    'squeezenet1_0': {},
    'vgg11': {},
    'inception_v3': {
        'unwrapper': lambda x: x.logits
    },
    'r3d_18': {},
    "fasterrcnn_resnet50_fpn": {
        'unwrapper': lambda x: x[1]
    },
    "maskrcnn_resnet50_fpn": {
        'unwrapper': lambda x: x[1]
    },
    "keypointrcnn_resnet50_fpn": {
        'unwrapper': lambda x: x[1]
    },
75
76
77
    "retinanet_resnet50_fpn": {
        'unwrapper': lambda x: x[1]
    }
78
}
79
80


81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
# The following models exhibit flaky numerics under autocast in _test_*_model harnesses.
# This may be caused by the harness environment (e.g. num classes, input initialization
# via torch.rand), and does not prove autocast is unsuitable when training with real data
# (autocast has been used successfully with real data for some of these models).
# TODO:  investigate why autocast numerics are flaky in the harnesses.
#
# For the following models, _test_*_model harnesses skip numerical checks on outputs when
# trying autocast. However, they still try an autocasted forward pass, so they still ensure
# autocast coverage suffices to prevent dtype errors in each model.
autocast_flaky_numerics = (
    "fasterrcnn_resnet50_fpn",
    "inception_v3",
    "keypointrcnn_resnet50_fpn",
    "maskrcnn_resnet50_fpn",
    "resnet101",
    "resnet152",
    "wide_resnet101_2",
98
    "retinanet_resnet50_fpn",
99
100
101
)


eellison's avatar
eellison committed
102
class ModelTester(TestCase):
103
    def checkModule(self, model, name, args):
104
        if name not in script_test_models:
105
            return
106
107
        unwrapper = script_test_models[name].get('unwrapper', None)
        return super(ModelTester, self).checkModule(model, args, unwrapper=unwrapper, skip=False)
108

109
    def _test_classification_model(self, name, input_shape, dev):
110
        set_rng_seed(0)
111
112
113
        # passing num_class equal to a number other than 1000 helps in making the test
        # more enforcing in nature
        model = models.__dict__[name](num_classes=50)
114
115
116
        model.eval().to(device=dev)
        # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
        x = torch.rand(input_shape).to(device=dev)
117
        out = model(x)
118
        self.assertExpected(out.cpu(), prec=0.1, strip_suffix="_" + dev)
119
        self.assertEqual(out.shape[-1], 50)
120
        self.checkModule(model, name, (x,))
121

122
123
124
125
126
127
128
129
130
        if dev == "cuda":
            with torch.cuda.amp.autocast():
                out = model(x)
                # See autocast_flaky_numerics comment at top of file.
                if name not in autocast_flaky_numerics:
                    self.assertExpected(out.cpu(), prec=0.1, strip_suffix="_" + dev)
                self.assertEqual(out.shape[-1], 50)

    def _test_segmentation_model(self, name, dev):
131
132
133
        # passing num_class equal to a number other than 1000 helps in making the test
        # more enforcing in nature
        model = models.segmentation.__dict__[name](num_classes=50, pretrained_backbone=False)
134
        model.eval().to(device=dev)
135
        input_shape = (1, 3, 300, 300)
136
137
        # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
        x = torch.rand(input_shape).to(device=dev)
138
139
        out = model(x)
        self.assertEqual(tuple(out["out"].shape), (1, 50, 300, 300))
140
        self.checkModule(model, name, (x,))
141

142
143
144
145
146
147
        if dev == "cuda":
            with torch.cuda.amp.autocast():
                out = model(x)
                self.assertEqual(tuple(out["out"].shape), (1, 50, 300, 300))

    def _test_detection_model(self, name, dev):
eellison's avatar
eellison committed
148
        set_rng_seed(0)
149
150
151
152
        kwargs = {}
        if "retinanet" in name:
            kwargs["score_thresh"] = 0.013
        model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False, **kwargs)
153
        if "keypointrcnn" in name or "retinanet" in name:
154
            overwrite_eps(model, 0.0)
155
        model.eval().to(device=dev)
156
        input_shape = (3, 300, 300)
157
158
        # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
        x = torch.rand(input_shape).to(device=dev)
159
160
161
        model_input = [x]
        out = model(model_input)
        self.assertIs(model_input[0], x)
eellison's avatar
eellison committed
162

163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
        def check_out(out):
            self.assertEqual(len(out), 1)

            def subsample_tensor(tensor):
                num_elems = tensor.numel()
                num_samples = 20
                if num_elems <= num_samples:
                    return tensor

                flat_tensor = tensor.flatten()
                ith_index = num_elems // num_samples
                return flat_tensor[ith_index - 1::ith_index]

            def compute_mean_std(tensor):
                # can't compute mean of integral tensor
                tensor = tensor.to(torch.double)
                mean = torch.mean(tensor)
                std = torch.std(tensor)
                return {"mean": mean, "std": std}

            if name == "maskrcnn_resnet50_fpn":
184
185
                # maskrcnn_resnet_50_fpn numerically unstable across platforms, so for now
                # compare results with mean and std
186
187
188
189
190
191
192
193
194
                test_value = map_nested_tensor_object(out, tensor_map_fn=compute_mean_std)
                # mean values are small, use large prec
                self.assertExpected(test_value, prec=.01, strip_suffix="_" + dev)
            else:
                self.assertExpected(map_nested_tensor_object(out, tensor_map_fn=subsample_tensor),
                                    prec=0.01,
                                    strip_suffix="_" + dev)

        check_out(out)
eellison's avatar
eellison committed
195

eellison's avatar
eellison committed
196
197
198
        scripted_model = torch.jit.script(model)
        scripted_model.eval()
        scripted_out = scripted_model(model_input)[1]
199
200
        self.assertEqual(scripted_out[0]["boxes"], out[0]["boxes"])
        self.assertEqual(scripted_out[0]["scores"], out[0]["scores"])
eellison's avatar
eellison committed
201
        # labels currently float in script: need to investigate (though same result)
202
        self.assertEqual(scripted_out[0]["labels"].to(dtype=torch.long), out[0]["labels"])
203
204
205
        self.assertTrue("boxes" in out[0])
        self.assertTrue("scores" in out[0])
        self.assertTrue("labels" in out[0])
eellison's avatar
eellison committed
206
207
208
        # don't check script because we are compiling it here:
        # TODO: refactor tests
        # self.check_script(model, name)
209
        self.checkModule(model, name, ([x],))
210

211
212
213
214
215
216
217
        if dev == "cuda":
            with torch.cuda.amp.autocast():
                out = model(model_input)
                # See autocast_flaky_numerics comment at top of file.
                if name not in autocast_flaky_numerics:
                    check_out(out)

218
219
220
    def _test_detection_model_validation(self, name):
        set_rng_seed(0)
        model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False)
221
        input_shape = (3, 300, 300)
222
223
224
225
226
227
228
229
230
231
232
233
234
235
        x = [torch.rand(input_shape)]

        # validate that targets are present in training
        self.assertRaises(ValueError, model, x)

        # validate type
        targets = [{'boxes': 0.}]
        self.assertRaises(ValueError, model, x, targets=targets)

        # validate boxes shape
        for boxes in (torch.rand((4,)), torch.rand((1, 5))):
            targets = [{'boxes': boxes}]
            self.assertRaises(ValueError, model, x, targets=targets)

236
237
238
239
240
        # validate that no degenerate boxes are present
        boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]])
        targets = [{'boxes': boxes}]
        self.assertRaises(ValueError, model, x, targets=targets)

241
    def _test_video_model(self, name, dev):
242
243
244
245
246
        # the default input shape is
        # bs * num_channels * clip_len * h *w
        input_shape = (1, 3, 4, 112, 112)
        # test both basicblock and Bottleneck
        model = models.video.__dict__[name](num_classes=50)
247
248
249
        model.eval().to(device=dev)
        # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
        x = torch.rand(input_shape).to(device=dev)
250
        out = model(x)
251
        self.checkModule(model, name, (x,))
252
253
        self.assertEqual(out.shape[-1], 50)

254
255
256
257
258
        if dev == "cuda":
            with torch.cuda.amp.autocast():
                out = model(x)
                self.assertEqual(out.shape[-1], 50)

259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
    def _make_sliced_model(self, model, stop_layer):
        layers = OrderedDict()
        for name, layer in model.named_children():
            layers[name] = layer
            if name == stop_layer:
                break
        new_model = torch.nn.Sequential(layers)
        return new_model

    def test_memory_efficient_densenet(self):
        input_shape = (1, 3, 300, 300)
        x = torch.rand(input_shape)

        for name in ['densenet121', 'densenet169', 'densenet201', 'densenet161']:
            model1 = models.__dict__[name](num_classes=50, memory_efficient=True)
            params = model1.state_dict()
275
            num_params = sum([x.numel() for x in model1.parameters()])
276
277
278
            model1.eval()
            out1 = model1(x)
            out1.sum().backward()
279
            num_grad = sum([x.grad.numel() for x in model1.parameters() if x.grad is not None])
280
281
282
283
284
285
286
287

            model2 = models.__dict__[name](num_classes=50, memory_efficient=False)
            model2.load_state_dict(params)
            model2.eval()
            out2 = model2(x)

            max_diff = (out1 - out2).abs().max()

288
            self.assertTrue(num_params == num_grad)
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
            self.assertTrue(max_diff < 1e-5)

    def test_resnet_dilation(self):
        # TODO improve tests to also check that each layer has the right dimensionality
        for i in product([False, True], [False, True], [False, True]):
            model = models.__dict__["resnet50"](replace_stride_with_dilation=i)
            model = self._make_sliced_model(model, stop_layer="layer4")
            model.eval()
            x = torch.rand(1, 3, 224, 224)
            out = model(x)
            f = 2 ** sum(i)
            self.assertEqual(out.shape, (1, 2048, 7 * f, 7 * f))

    def test_mobilenetv2_residual_setting(self):
        model = models.__dict__["mobilenet_v2"](inverted_residual_setting=[[1, 16, 1, 1], [6, 24, 2, 2]])
        model.eval()
        x = torch.rand(1, 3, 224, 224)
        out = model(x)
        self.assertEqual(out.shape[-1], 1000)

309
310
311
312
313
314
315
316
317
318
319
    def test_mobilenetv2_norm_layer(self):
        model = models.__dict__["mobilenet_v2"]()
        self.assertTrue(any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))

        def get_gn(num_channels):
            return nn.GroupNorm(32, num_channels)

        model = models.__dict__["mobilenet_v2"](norm_layer=get_gn)
        self.assertFalse(any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))
        self.assertTrue(any(isinstance(x, nn.GroupNorm) for x in model.modules()))

320
321
322
323
324
325
326
327
328
329
330
331
332
333
    def test_fasterrcnn_double(self):
        model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)
        model.double()
        model.eval()
        input_shape = (3, 300, 300)
        x = torch.rand(input_shape, dtype=torch.float64)
        model_input = [x]
        out = model(model_input)
        self.assertIs(model_input[0], x)
        self.assertEqual(len(out), 1)
        self.assertTrue("boxes" in out[0])
        self.assertTrue("scores" in out[0])
        self.assertTrue("labels" in out[0])

334
335
336
337
    def test_googlenet_eval(self):
        m = torch.jit.script(models.googlenet(pretrained=True).eval())
        self.checkModule(m, "googlenet", torch.rand(1, 3, 224, 224))

338
339
    @unittest.skipIf(not torch.cuda.is_available(), 'needs GPU')
    def test_fasterrcnn_switch_devices(self):
340
341
342
343
344
345
        def checkOut(out):
            self.assertEqual(len(out), 1)
            self.assertTrue("boxes" in out[0])
            self.assertTrue("scores" in out[0])
            self.assertTrue("labels" in out[0])

346
347
348
349
350
351
352
353
        model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)
        model.cuda()
        model.eval()
        input_shape = (3, 300, 300)
        x = torch.rand(input_shape, device='cuda')
        model_input = [x]
        out = model(model_input)
        self.assertIs(model_input[0], x)
354
355
356
357
358
359
360
361

        checkOut(out)

        with torch.cuda.amp.autocast():
            out = model(model_input)

        checkOut(out)

362
363
364
365
        # now switch to cpu and make sure it works
        model.cpu()
        x = x.cpu()
        out_cpu = model([x])
366
367

        checkOut(out_cpu)
368

369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
    def test_generalizedrcnn_transform_repr(self):

        min_size, max_size = 224, 299
        image_mean = [0.485, 0.456, 0.406]
        image_std = [0.229, 0.224, 0.225]

        t = models.detection.transform.GeneralizedRCNNTransform(min_size=min_size,
                                                                max_size=max_size,
                                                                image_mean=image_mean,
                                                                image_std=image_std)

        # Check integrity of object __repr__ attribute
        expected_string = 'GeneralizedRCNNTransform('
        _indent = '\n    '
        expected_string += '{0}Normalize(mean={1}, std={2})'.format(_indent, image_mean, image_std)
        expected_string += '{0}Resize(min_size=({1},), max_size={2}, '.format(_indent, min_size, max_size)
        expected_string += "mode='bilinear')\n)"
        self.assertEqual(t.__repr__(), expected_string)

388

389
390
391
_devs = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]


392
for model_name in get_available_classification_models():
393
394
395
396
397
398
399
400
    for dev in _devs:
        # for-loop bodies don't define scopes, so we have to save the variables
        # we want to close over in some way
        def do_test(self, model_name=model_name, dev=dev):
            input_shape = (1, 3, 224, 224)
            if model_name in ['inception_v3']:
                input_shape = (1, 3, 299, 299)
            self._test_classification_model(model_name, input_shape, dev)
401

402
        setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
403
404
405


for model_name in get_available_segmentation_models():
406
407
408
409
410
    for dev in _devs:
        # for-loop bodies don't define scopes, so we have to save the variables
        # we want to close over in some way
        def do_test(self, model_name=model_name, dev=dev):
            self._test_segmentation_model(model_name, dev)
411

412
        setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
413
414


415
for model_name in get_available_detection_models():
416
417
418
419
420
    for dev in _devs:
        # for-loop bodies don't define scopes, so we have to save the variables
        # we want to close over in some way
        def do_test(self, model_name=model_name, dev=dev):
            self._test_detection_model(model_name, dev)
421

422
        setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
423

424
425
426
427
428
    def do_validation_test(self, model_name=model_name):
        self._test_detection_model_validation(model_name)

    setattr(ModelTester, "test_" + model_name + "_validation", do_validation_test)

429

430
for model_name in get_available_video_models():
431
432
433
    for dev in _devs:
        def do_test(self, model_name=model_name, dev=dev):
            self._test_video_model(model_name, dev)
434

435
        setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
436

437
438
if __name__ == '__main__':
    unittest.main()