test_models.py 16.5 KB
Newer Older
1
from common_utils import TestCase, map_nested_tensor_object, freeze_rng_state
2
3
from collections import OrderedDict
from itertools import product
4
import torch
5
import torch.nn as nn
eellison's avatar
eellison committed
6
import numpy as np
7
8
from torchvision import models
import unittest
eellison's avatar
eellison committed
9
import traceback
eellison's avatar
eellison committed
10
11
12
import random


13
def set_rng_seed(seed):
eellison's avatar
eellison committed
14
15
16
    torch.manual_seed(seed)
    random.seed(seed)
    np.random.seed(seed)
17
18


19
20
21
def get_available_classification_models():
    # TODO add a registration mechanism to torchvision.models
    return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
22
23
24
25
26


def get_available_segmentation_models():
    # TODO add a registration mechanism to torchvision.models
    return [k for k, v in models.segmentation.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
27
28


29
30
31
32
33
def get_available_detection_models():
    # TODO add a registration mechanism to torchvision.models
    return [k for k, v in models.detection.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]


34
35
36
37
38
def get_available_video_models():
    # TODO add a registration mechanism to torchvision.models
    return [k for k, v in models.video.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]


39
40
41
# models that are in torch hub, as well as r3d_18. we tried testing all models
# but the test was too slow. not included are detection models, because
# they are not yet supported in JIT.
42
43
44
45
# If 'unwrapper' is provided it will be called with the script model outputs
# before they are compared to the eager model outputs. This is useful if the
# model outputs are different between TorchScript / Eager mode
script_test_models = {
46
    'deeplabv3_resnet50': {},
47
48
49
    'deeplabv3_resnet101': {},
    'mobilenet_v2': {},
    'resnext50_32x4d': {},
50
    'fcn_resnet50': {},
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
    'fcn_resnet101': {},
    'googlenet': {
        'unwrapper': lambda x: x.logits
    },
    'densenet121': {},
    'resnet18': {},
    'alexnet': {},
    'shufflenet_v2_x1_0': {},
    'squeezenet1_0': {},
    'vgg11': {},
    'inception_v3': {
        'unwrapper': lambda x: x.logits
    },
    'r3d_18': {},
    "fasterrcnn_resnet50_fpn": {
        'unwrapper': lambda x: x[1]
    },
    "maskrcnn_resnet50_fpn": {
        'unwrapper': lambda x: x[1]
    },
    "keypointrcnn_resnet50_fpn": {
        'unwrapper': lambda x: x[1]
    },
}
75
76


77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# The following models exhibit flaky numerics under autocast in _test_*_model harnesses.
# This may be caused by the harness environment (e.g. num classes, input initialization
# via torch.rand), and does not prove autocast is unsuitable when training with real data
# (autocast has been used successfully with real data for some of these models).
# TODO:  investigate why autocast numerics are flaky in the harnesses.
#
# For the following models, _test_*_model harnesses skip numerical checks on outputs when
# trying autocast. However, they still try an autocasted forward pass, so they still ensure
# autocast coverage suffices to prevent dtype errors in each model.
autocast_flaky_numerics = (
    "fasterrcnn_resnet50_fpn",
    "inception_v3",
    "keypointrcnn_resnet50_fpn",
    "maskrcnn_resnet50_fpn",
    "resnet101",
    "resnet152",
    "wide_resnet101_2",
)


eellison's avatar
eellison committed
97
class ModelTester(TestCase):
98
    def checkModule(self, model, name, args):
99
        if name not in script_test_models:
100
            return
101
102
        unwrapper = script_test_models[name].get('unwrapper', None)
        return super(ModelTester, self).checkModule(model, args, unwrapper=unwrapper, skip=False)
103

104
    def _test_classification_model(self, name, input_shape, dev):
105
        set_rng_seed(0)
106
107
108
        # passing num_class equal to a number other than 1000 helps in making the test
        # more enforcing in nature
        model = models.__dict__[name](num_classes=50)
109
110
111
        model.eval().to(device=dev)
        # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
        x = torch.rand(input_shape).to(device=dev)
112
        out = model(x)
113
        self.assertExpected(out.cpu(), prec=0.1, strip_suffix="_" + dev)
114
        self.assertEqual(out.shape[-1], 50)
115
        self.checkModule(model, name, (x,))
116

117
118
119
120
121
122
123
124
125
        if dev == "cuda":
            with torch.cuda.amp.autocast():
                out = model(x)
                # See autocast_flaky_numerics comment at top of file.
                if name not in autocast_flaky_numerics:
                    self.assertExpected(out.cpu(), prec=0.1, strip_suffix="_" + dev)
                self.assertEqual(out.shape[-1], 50)

    def _test_segmentation_model(self, name, dev):
126
127
128
        # passing num_class equal to a number other than 1000 helps in making the test
        # more enforcing in nature
        model = models.segmentation.__dict__[name](num_classes=50, pretrained_backbone=False)
129
        model.eval().to(device=dev)
130
        input_shape = (1, 3, 300, 300)
131
132
        # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
        x = torch.rand(input_shape).to(device=dev)
133
134
        out = model(x)
        self.assertEqual(tuple(out["out"].shape), (1, 50, 300, 300))
135
        self.checkModule(model, name, (x,))
136

137
138
139
140
141
142
        if dev == "cuda":
            with torch.cuda.amp.autocast():
                out = model(x)
                self.assertEqual(tuple(out["out"].shape), (1, 50, 300, 300))

    def _test_detection_model(self, name, dev):
eellison's avatar
eellison committed
143
        set_rng_seed(0)
144
        model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False)
145
        model.eval().to(device=dev)
146
        input_shape = (3, 300, 300)
147
148
        # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
        x = torch.rand(input_shape).to(device=dev)
149
150
151
        model_input = [x]
        out = model(model_input)
        self.assertIs(model_input[0], x)
eellison's avatar
eellison committed
152

153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
        def check_out(out):
            self.assertEqual(len(out), 1)

            def subsample_tensor(tensor):
                num_elems = tensor.numel()
                num_samples = 20
                if num_elems <= num_samples:
                    return tensor

                flat_tensor = tensor.flatten()
                ith_index = num_elems // num_samples
                return flat_tensor[ith_index - 1::ith_index]

            def compute_mean_std(tensor):
                # can't compute mean of integral tensor
                tensor = tensor.to(torch.double)
                mean = torch.mean(tensor)
                std = torch.std(tensor)
                return {"mean": mean, "std": std}

            # maskrcnn_resnet_50_fpn numerically unstable across platforms, so for now
            # compare results with mean and std
            if name == "maskrcnn_resnet50_fpn":
                test_value = map_nested_tensor_object(out, tensor_map_fn=compute_mean_std)
                # mean values are small, use large prec
                self.assertExpected(test_value, prec=.01, strip_suffix="_" + dev)
            else:
                self.assertExpected(map_nested_tensor_object(out, tensor_map_fn=subsample_tensor),
                                    prec=0.01,
                                    strip_suffix="_" + dev)

        check_out(out)
eellison's avatar
eellison committed
185

eellison's avatar
eellison committed
186
187
188
        scripted_model = torch.jit.script(model)
        scripted_model.eval()
        scripted_out = scripted_model(model_input)[1]
189
190
        self.assertEqual(scripted_out[0]["boxes"], out[0]["boxes"])
        self.assertEqual(scripted_out[0]["scores"], out[0]["scores"])
eellison's avatar
eellison committed
191
        # labels currently float in script: need to investigate (though same result)
192
        self.assertEqual(scripted_out[0]["labels"].to(dtype=torch.long), out[0]["labels"])
193
194
195
        self.assertTrue("boxes" in out[0])
        self.assertTrue("scores" in out[0])
        self.assertTrue("labels" in out[0])
eellison's avatar
eellison committed
196
197
198
        # don't check script because we are compiling it here:
        # TODO: refactor tests
        # self.check_script(model, name)
199
        self.checkModule(model, name, ([x],))
200

201
202
203
204
205
206
207
        if dev == "cuda":
            with torch.cuda.amp.autocast():
                out = model(model_input)
                # See autocast_flaky_numerics comment at top of file.
                if name not in autocast_flaky_numerics:
                    check_out(out)

208
209
210
    def _test_detection_model_validation(self, name):
        set_rng_seed(0)
        model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False)
211
        input_shape = (3, 300, 300)
212
213
214
215
216
217
218
219
220
221
222
223
224
225
        x = [torch.rand(input_shape)]

        # validate that targets are present in training
        self.assertRaises(ValueError, model, x)

        # validate type
        targets = [{'boxes': 0.}]
        self.assertRaises(ValueError, model, x, targets=targets)

        # validate boxes shape
        for boxes in (torch.rand((4,)), torch.rand((1, 5))):
            targets = [{'boxes': boxes}]
            self.assertRaises(ValueError, model, x, targets=targets)

226
227
228
229
230
        # validate that no degenerate boxes are present
        boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]])
        targets = [{'boxes': boxes}]
        self.assertRaises(ValueError, model, x, targets=targets)

231
    def _test_video_model(self, name, dev):
232
233
234
235
236
        # the default input shape is
        # bs * num_channels * clip_len * h *w
        input_shape = (1, 3, 4, 112, 112)
        # test both basicblock and Bottleneck
        model = models.video.__dict__[name](num_classes=50)
237
238
239
        model.eval().to(device=dev)
        # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
        x = torch.rand(input_shape).to(device=dev)
240
        out = model(x)
241
        self.checkModule(model, name, (x,))
242
243
        self.assertEqual(out.shape[-1], 50)

244
245
246
247
248
        if dev == "cuda":
            with torch.cuda.amp.autocast():
                out = model(x)
                self.assertEqual(out.shape[-1], 50)

249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
    def _make_sliced_model(self, model, stop_layer):
        layers = OrderedDict()
        for name, layer in model.named_children():
            layers[name] = layer
            if name == stop_layer:
                break
        new_model = torch.nn.Sequential(layers)
        return new_model

    def test_memory_efficient_densenet(self):
        input_shape = (1, 3, 300, 300)
        x = torch.rand(input_shape)

        for name in ['densenet121', 'densenet169', 'densenet201', 'densenet161']:
            model1 = models.__dict__[name](num_classes=50, memory_efficient=True)
            params = model1.state_dict()
265
            num_params = sum([x.numel() for x in model1.parameters()])
266
267
268
            model1.eval()
            out1 = model1(x)
            out1.sum().backward()
269
            num_grad = sum([x.grad.numel() for x in model1.parameters() if x.grad is not None])
270
271
272
273
274
275
276
277

            model2 = models.__dict__[name](num_classes=50, memory_efficient=False)
            model2.load_state_dict(params)
            model2.eval()
            out2 = model2(x)

            max_diff = (out1 - out2).abs().max()

278
            self.assertTrue(num_params == num_grad)
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
            self.assertTrue(max_diff < 1e-5)

    def test_resnet_dilation(self):
        # TODO improve tests to also check that each layer has the right dimensionality
        for i in product([False, True], [False, True], [False, True]):
            model = models.__dict__["resnet50"](replace_stride_with_dilation=i)
            model = self._make_sliced_model(model, stop_layer="layer4")
            model.eval()
            x = torch.rand(1, 3, 224, 224)
            out = model(x)
            f = 2 ** sum(i)
            self.assertEqual(out.shape, (1, 2048, 7 * f, 7 * f))

    def test_mobilenetv2_residual_setting(self):
        model = models.__dict__["mobilenet_v2"](inverted_residual_setting=[[1, 16, 1, 1], [6, 24, 2, 2]])
        model.eval()
        x = torch.rand(1, 3, 224, 224)
        out = model(x)
        self.assertEqual(out.shape[-1], 1000)

299
300
301
302
303
304
305
306
307
308
309
    def test_mobilenetv2_norm_layer(self):
        model = models.__dict__["mobilenet_v2"]()
        self.assertTrue(any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))

        def get_gn(num_channels):
            return nn.GroupNorm(32, num_channels)

        model = models.__dict__["mobilenet_v2"](norm_layer=get_gn)
        self.assertFalse(any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))
        self.assertTrue(any(isinstance(x, nn.GroupNorm) for x in model.modules()))

310
311
312
313
314
315
316
317
318
319
320
321
322
323
    def test_fasterrcnn_double(self):
        model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)
        model.double()
        model.eval()
        input_shape = (3, 300, 300)
        x = torch.rand(input_shape, dtype=torch.float64)
        model_input = [x]
        out = model(model_input)
        self.assertIs(model_input[0], x)
        self.assertEqual(len(out), 1)
        self.assertTrue("boxes" in out[0])
        self.assertTrue("scores" in out[0])
        self.assertTrue("labels" in out[0])

324
325
326
327
    def test_googlenet_eval(self):
        m = torch.jit.script(models.googlenet(pretrained=True).eval())
        self.checkModule(m, "googlenet", torch.rand(1, 3, 224, 224))

328
329
    @unittest.skipIf(not torch.cuda.is_available(), 'needs GPU')
    def test_fasterrcnn_switch_devices(self):
330
331
332
333
334
335
        def checkOut(out):
            self.assertEqual(len(out), 1)
            self.assertTrue("boxes" in out[0])
            self.assertTrue("scores" in out[0])
            self.assertTrue("labels" in out[0])

336
337
338
339
340
341
342
343
        model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)
        model.cuda()
        model.eval()
        input_shape = (3, 300, 300)
        x = torch.rand(input_shape, device='cuda')
        model_input = [x]
        out = model(model_input)
        self.assertIs(model_input[0], x)
344
345
346
347
348
349
350
351

        checkOut(out)

        with torch.cuda.amp.autocast():
            out = model(model_input)

        checkOut(out)

352
353
354
355
        # now switch to cpu and make sure it works
        model.cpu()
        x = x.cpu()
        out_cpu = model([x])
356
357

        checkOut(out_cpu)
358

359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
    def test_generalizedrcnn_transform_repr(self):

        min_size, max_size = 224, 299
        image_mean = [0.485, 0.456, 0.406]
        image_std = [0.229, 0.224, 0.225]

        t = models.detection.transform.GeneralizedRCNNTransform(min_size=min_size,
                                                                max_size=max_size,
                                                                image_mean=image_mean,
                                                                image_std=image_std)

        # Check integrity of object __repr__ attribute
        expected_string = 'GeneralizedRCNNTransform('
        _indent = '\n    '
        expected_string += '{0}Normalize(mean={1}, std={2})'.format(_indent, image_mean, image_std)
        expected_string += '{0}Resize(min_size=({1},), max_size={2}, '.format(_indent, min_size, max_size)
        expected_string += "mode='bilinear')\n)"
        self.assertEqual(t.__repr__(), expected_string)

378

379
380
381
_devs = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]


382
for model_name in get_available_classification_models():
383
384
385
386
387
388
389
390
    for dev in _devs:
        # for-loop bodies don't define scopes, so we have to save the variables
        # we want to close over in some way
        def do_test(self, model_name=model_name, dev=dev):
            input_shape = (1, 3, 224, 224)
            if model_name in ['inception_v3']:
                input_shape = (1, 3, 299, 299)
            self._test_classification_model(model_name, input_shape, dev)
391

392
        setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
393
394
395


for model_name in get_available_segmentation_models():
396
397
398
399
400
    for dev in _devs:
        # for-loop bodies don't define scopes, so we have to save the variables
        # we want to close over in some way
        def do_test(self, model_name=model_name, dev=dev):
            self._test_segmentation_model(model_name, dev)
401

402
        setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
403
404


405
for model_name in get_available_detection_models():
406
407
408
409
410
    for dev in _devs:
        # for-loop bodies don't define scopes, so we have to save the variables
        # we want to close over in some way
        def do_test(self, model_name=model_name, dev=dev):
            self._test_detection_model(model_name, dev)
411

412
        setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
413

414
415
416
417
418
    def do_validation_test(self, model_name=model_name):
        self._test_detection_model_validation(model_name)

    setattr(ModelTester, "test_" + model_name + "_validation", do_validation_test)

419

420
for model_name in get_available_video_models():
421
422
423
    for dev in _devs:
        def do_test(self, model_name=model_name, dev=dev):
            self._test_video_model(model_name, dev)
424

425
        setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
426

427
428
if __name__ == '__main__':
    unittest.main()