test_models.py 16.6 KB
Newer Older
1
from common_utils import TestCase, map_nested_tensor_object, freeze_rng_state
2
3
from collections import OrderedDict
from itertools import product
4
import torch
5
import torch.nn as nn
eellison's avatar
eellison committed
6
import numpy as np
7
8
from torchvision import models
import unittest
eellison's avatar
eellison committed
9
import traceback
eellison's avatar
eellison committed
10
11
12
import random


13
def set_rng_seed(seed):
eellison's avatar
eellison committed
14
15
16
    torch.manual_seed(seed)
    random.seed(seed)
    np.random.seed(seed)
17
18


19
20
21
def get_available_classification_models():
    # TODO add a registration mechanism to torchvision.models
    return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
22
23
24
25
26


def get_available_segmentation_models():
    # TODO add a registration mechanism to torchvision.models
    return [k for k, v in models.segmentation.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
27
28


29
30
31
32
33
def get_available_detection_models():
    # TODO add a registration mechanism to torchvision.models
    return [k for k, v in models.detection.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]


34
35
36
37
38
def get_available_video_models():
    # TODO add a registration mechanism to torchvision.models
    return [k for k, v in models.video.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]


39
40
41
# models that are in torch hub, as well as r3d_18. we tried testing all models
# but the test was too slow. not included are detection models, because
# they are not yet supported in JIT.
42
43
44
45
# If 'unwrapper' is provided it will be called with the script model outputs
# before they are compared to the eager model outputs. This is useful if the
# model outputs are different between TorchScript / Eager mode
script_test_models = {
46
    'deeplabv3_resnet50': {},
47
48
49
    'deeplabv3_resnet101': {},
    'mobilenet_v2': {},
    'resnext50_32x4d': {},
50
    'fcn_resnet50': {},
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
    'fcn_resnet101': {},
    'googlenet': {
        'unwrapper': lambda x: x.logits
    },
    'densenet121': {},
    'resnet18': {},
    'alexnet': {},
    'shufflenet_v2_x1_0': {},
    'squeezenet1_0': {},
    'vgg11': {},
    'inception_v3': {
        'unwrapper': lambda x: x.logits
    },
    'r3d_18': {},
    "fasterrcnn_resnet50_fpn": {
        'unwrapper': lambda x: x[1]
    },
    "maskrcnn_resnet50_fpn": {
        'unwrapper': lambda x: x[1]
    },
    "keypointrcnn_resnet50_fpn": {
        'unwrapper': lambda x: x[1]
    },
74
75
76
    "retinanet_resnet50_fpn": {
        'unwrapper': lambda x: x[1]
    }
77
}
78
79


80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# The following models exhibit flaky numerics under autocast in _test_*_model harnesses.
# This may be caused by the harness environment (e.g. num classes, input initialization
# via torch.rand), and does not prove autocast is unsuitable when training with real data
# (autocast has been used successfully with real data for some of these models).
# TODO:  investigate why autocast numerics are flaky in the harnesses.
#
# For the following models, _test_*_model harnesses skip numerical checks on outputs when
# trying autocast. However, they still try an autocasted forward pass, so they still ensure
# autocast coverage suffices to prevent dtype errors in each model.
autocast_flaky_numerics = (
    "fasterrcnn_resnet50_fpn",
    "inception_v3",
    "keypointrcnn_resnet50_fpn",
    "maskrcnn_resnet50_fpn",
    "resnet101",
    "resnet152",
    "wide_resnet101_2",
)


eellison's avatar
eellison committed
100
class ModelTester(TestCase):
101
    def checkModule(self, model, name, args):
102
        if name not in script_test_models:
103
            return
104
105
        unwrapper = script_test_models[name].get('unwrapper', None)
        return super(ModelTester, self).checkModule(model, args, unwrapper=unwrapper, skip=False)
106

107
    def _test_classification_model(self, name, input_shape, dev):
108
        set_rng_seed(0)
109
110
111
        # passing num_class equal to a number other than 1000 helps in making the test
        # more enforcing in nature
        model = models.__dict__[name](num_classes=50)
112
113
114
        model.eval().to(device=dev)
        # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
        x = torch.rand(input_shape).to(device=dev)
115
        out = model(x)
116
        self.assertExpected(out.cpu(), prec=0.1, strip_suffix="_" + dev)
117
        self.assertEqual(out.shape[-1], 50)
118
        self.checkModule(model, name, (x,))
119

120
121
122
123
124
125
126
127
128
        if dev == "cuda":
            with torch.cuda.amp.autocast():
                out = model(x)
                # See autocast_flaky_numerics comment at top of file.
                if name not in autocast_flaky_numerics:
                    self.assertExpected(out.cpu(), prec=0.1, strip_suffix="_" + dev)
                self.assertEqual(out.shape[-1], 50)

    def _test_segmentation_model(self, name, dev):
129
130
131
        # passing num_class equal to a number other than 1000 helps in making the test
        # more enforcing in nature
        model = models.segmentation.__dict__[name](num_classes=50, pretrained_backbone=False)
132
        model.eval().to(device=dev)
133
        input_shape = (1, 3, 300, 300)
134
135
        # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
        x = torch.rand(input_shape).to(device=dev)
136
137
        out = model(x)
        self.assertEqual(tuple(out["out"].shape), (1, 50, 300, 300))
138
        self.checkModule(model, name, (x,))
139

140
141
142
143
144
145
        if dev == "cuda":
            with torch.cuda.amp.autocast():
                out = model(x)
                self.assertEqual(tuple(out["out"].shape), (1, 50, 300, 300))

    def _test_detection_model(self, name, dev):
eellison's avatar
eellison committed
146
        set_rng_seed(0)
147
        model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False)
148
        model.eval().to(device=dev)
149
        input_shape = (3, 300, 300)
150
151
        # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
        x = torch.rand(input_shape).to(device=dev)
152
153
154
        model_input = [x]
        out = model(model_input)
        self.assertIs(model_input[0], x)
eellison's avatar
eellison committed
155

156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
        def check_out(out):
            self.assertEqual(len(out), 1)

            def subsample_tensor(tensor):
                num_elems = tensor.numel()
                num_samples = 20
                if num_elems <= num_samples:
                    return tensor

                flat_tensor = tensor.flatten()
                ith_index = num_elems // num_samples
                return flat_tensor[ith_index - 1::ith_index]

            def compute_mean_std(tensor):
                # can't compute mean of integral tensor
                tensor = tensor.to(torch.double)
                mean = torch.mean(tensor)
                std = torch.std(tensor)
                return {"mean": mean, "std": std}

            # maskrcnn_resnet_50_fpn numerically unstable across platforms, so for now
            # compare results with mean and std
            if name == "maskrcnn_resnet50_fpn":
                test_value = map_nested_tensor_object(out, tensor_map_fn=compute_mean_std)
                # mean values are small, use large prec
                self.assertExpected(test_value, prec=.01, strip_suffix="_" + dev)
            else:
                self.assertExpected(map_nested_tensor_object(out, tensor_map_fn=subsample_tensor),
                                    prec=0.01,
                                    strip_suffix="_" + dev)

        check_out(out)
eellison's avatar
eellison committed
188

eellison's avatar
eellison committed
189
190
191
        scripted_model = torch.jit.script(model)
        scripted_model.eval()
        scripted_out = scripted_model(model_input)[1]
192
193
        self.assertEqual(scripted_out[0]["boxes"], out[0]["boxes"])
        self.assertEqual(scripted_out[0]["scores"], out[0]["scores"])
eellison's avatar
eellison committed
194
        # labels currently float in script: need to investigate (though same result)
195
        self.assertEqual(scripted_out[0]["labels"].to(dtype=torch.long), out[0]["labels"])
196
197
198
        self.assertTrue("boxes" in out[0])
        self.assertTrue("scores" in out[0])
        self.assertTrue("labels" in out[0])
eellison's avatar
eellison committed
199
200
201
        # don't check script because we are compiling it here:
        # TODO: refactor tests
        # self.check_script(model, name)
202
        self.checkModule(model, name, ([x],))
203

204
205
206
207
208
209
210
        if dev == "cuda":
            with torch.cuda.amp.autocast():
                out = model(model_input)
                # See autocast_flaky_numerics comment at top of file.
                if name not in autocast_flaky_numerics:
                    check_out(out)

211
212
213
    def _test_detection_model_validation(self, name):
        set_rng_seed(0)
        model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False)
214
        input_shape = (3, 300, 300)
215
216
217
218
219
220
221
222
223
224
225
226
227
228
        x = [torch.rand(input_shape)]

        # validate that targets are present in training
        self.assertRaises(ValueError, model, x)

        # validate type
        targets = [{'boxes': 0.}]
        self.assertRaises(ValueError, model, x, targets=targets)

        # validate boxes shape
        for boxes in (torch.rand((4,)), torch.rand((1, 5))):
            targets = [{'boxes': boxes}]
            self.assertRaises(ValueError, model, x, targets=targets)

229
230
231
232
233
        # validate that no degenerate boxes are present
        boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]])
        targets = [{'boxes': boxes}]
        self.assertRaises(ValueError, model, x, targets=targets)

234
    def _test_video_model(self, name, dev):
235
236
237
238
239
        # the default input shape is
        # bs * num_channels * clip_len * h *w
        input_shape = (1, 3, 4, 112, 112)
        # test both basicblock and Bottleneck
        model = models.video.__dict__[name](num_classes=50)
240
241
242
        model.eval().to(device=dev)
        # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
        x = torch.rand(input_shape).to(device=dev)
243
        out = model(x)
244
        self.checkModule(model, name, (x,))
245
246
        self.assertEqual(out.shape[-1], 50)

247
248
249
250
251
        if dev == "cuda":
            with torch.cuda.amp.autocast():
                out = model(x)
                self.assertEqual(out.shape[-1], 50)

252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
    def _make_sliced_model(self, model, stop_layer):
        layers = OrderedDict()
        for name, layer in model.named_children():
            layers[name] = layer
            if name == stop_layer:
                break
        new_model = torch.nn.Sequential(layers)
        return new_model

    def test_memory_efficient_densenet(self):
        input_shape = (1, 3, 300, 300)
        x = torch.rand(input_shape)

        for name in ['densenet121', 'densenet169', 'densenet201', 'densenet161']:
            model1 = models.__dict__[name](num_classes=50, memory_efficient=True)
            params = model1.state_dict()
268
            num_params = sum([x.numel() for x in model1.parameters()])
269
270
271
            model1.eval()
            out1 = model1(x)
            out1.sum().backward()
272
            num_grad = sum([x.grad.numel() for x in model1.parameters() if x.grad is not None])
273
274
275
276
277
278
279
280

            model2 = models.__dict__[name](num_classes=50, memory_efficient=False)
            model2.load_state_dict(params)
            model2.eval()
            out2 = model2(x)

            max_diff = (out1 - out2).abs().max()

281
            self.assertTrue(num_params == num_grad)
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
            self.assertTrue(max_diff < 1e-5)

    def test_resnet_dilation(self):
        # TODO improve tests to also check that each layer has the right dimensionality
        for i in product([False, True], [False, True], [False, True]):
            model = models.__dict__["resnet50"](replace_stride_with_dilation=i)
            model = self._make_sliced_model(model, stop_layer="layer4")
            model.eval()
            x = torch.rand(1, 3, 224, 224)
            out = model(x)
            f = 2 ** sum(i)
            self.assertEqual(out.shape, (1, 2048, 7 * f, 7 * f))

    def test_mobilenetv2_residual_setting(self):
        model = models.__dict__["mobilenet_v2"](inverted_residual_setting=[[1, 16, 1, 1], [6, 24, 2, 2]])
        model.eval()
        x = torch.rand(1, 3, 224, 224)
        out = model(x)
        self.assertEqual(out.shape[-1], 1000)

302
303
304
305
306
307
308
309
310
311
312
    def test_mobilenetv2_norm_layer(self):
        model = models.__dict__["mobilenet_v2"]()
        self.assertTrue(any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))

        def get_gn(num_channels):
            return nn.GroupNorm(32, num_channels)

        model = models.__dict__["mobilenet_v2"](norm_layer=get_gn)
        self.assertFalse(any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))
        self.assertTrue(any(isinstance(x, nn.GroupNorm) for x in model.modules()))

313
314
315
316
317
318
319
320
321
322
323
324
325
326
    def test_fasterrcnn_double(self):
        model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)
        model.double()
        model.eval()
        input_shape = (3, 300, 300)
        x = torch.rand(input_shape, dtype=torch.float64)
        model_input = [x]
        out = model(model_input)
        self.assertIs(model_input[0], x)
        self.assertEqual(len(out), 1)
        self.assertTrue("boxes" in out[0])
        self.assertTrue("scores" in out[0])
        self.assertTrue("labels" in out[0])

327
328
329
330
    def test_googlenet_eval(self):
        m = torch.jit.script(models.googlenet(pretrained=True).eval())
        self.checkModule(m, "googlenet", torch.rand(1, 3, 224, 224))

331
332
    @unittest.skipIf(not torch.cuda.is_available(), 'needs GPU')
    def test_fasterrcnn_switch_devices(self):
333
334
335
336
337
338
        def checkOut(out):
            self.assertEqual(len(out), 1)
            self.assertTrue("boxes" in out[0])
            self.assertTrue("scores" in out[0])
            self.assertTrue("labels" in out[0])

339
340
341
342
343
344
345
346
        model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)
        model.cuda()
        model.eval()
        input_shape = (3, 300, 300)
        x = torch.rand(input_shape, device='cuda')
        model_input = [x]
        out = model(model_input)
        self.assertIs(model_input[0], x)
347
348
349
350
351
352
353
354

        checkOut(out)

        with torch.cuda.amp.autocast():
            out = model(model_input)

        checkOut(out)

355
356
357
358
        # now switch to cpu and make sure it works
        model.cpu()
        x = x.cpu()
        out_cpu = model([x])
359
360

        checkOut(out_cpu)
361

362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
    def test_generalizedrcnn_transform_repr(self):

        min_size, max_size = 224, 299
        image_mean = [0.485, 0.456, 0.406]
        image_std = [0.229, 0.224, 0.225]

        t = models.detection.transform.GeneralizedRCNNTransform(min_size=min_size,
                                                                max_size=max_size,
                                                                image_mean=image_mean,
                                                                image_std=image_std)

        # Check integrity of object __repr__ attribute
        expected_string = 'GeneralizedRCNNTransform('
        _indent = '\n    '
        expected_string += '{0}Normalize(mean={1}, std={2})'.format(_indent, image_mean, image_std)
        expected_string += '{0}Resize(min_size=({1},), max_size={2}, '.format(_indent, min_size, max_size)
        expected_string += "mode='bilinear')\n)"
        self.assertEqual(t.__repr__(), expected_string)

381

382
383
384
_devs = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]


385
for model_name in get_available_classification_models():
386
387
388
389
390
391
392
393
    for dev in _devs:
        # for-loop bodies don't define scopes, so we have to save the variables
        # we want to close over in some way
        def do_test(self, model_name=model_name, dev=dev):
            input_shape = (1, 3, 224, 224)
            if model_name in ['inception_v3']:
                input_shape = (1, 3, 299, 299)
            self._test_classification_model(model_name, input_shape, dev)
394

395
        setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
396
397
398


for model_name in get_available_segmentation_models():
399
400
401
402
403
    for dev in _devs:
        # for-loop bodies don't define scopes, so we have to save the variables
        # we want to close over in some way
        def do_test(self, model_name=model_name, dev=dev):
            self._test_segmentation_model(model_name, dev)
404

405
        setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
406
407


408
for model_name in get_available_detection_models():
409
410
411
412
413
    for dev in _devs:
        # for-loop bodies don't define scopes, so we have to save the variables
        # we want to close over in some way
        def do_test(self, model_name=model_name, dev=dev):
            self._test_detection_model(model_name, dev)
414

415
        setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
416

417
418
419
420
421
    def do_validation_test(self, model_name=model_name):
        self._test_detection_model_validation(model_name)

    setattr(ModelTester, "test_" + model_name + "_validation", do_validation_test)

422

423
for model_name in get_available_video_models():
424
425
426
    for dev in _devs:
        def do_test(self, model_name=model_name, dev=dev):
            self._test_video_model(model_name, dev)
427

428
        setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
429

430
431
if __name__ == '__main__':
    unittest.main()