"benchmark/Cargo.lock" did not exist on "c720555adc4ea192a55c101deafcd96daa8ee6b1"
test_models.py 28.8 KB
Newer Older
1
import contextlib
2
import functools
3
import io
4
5
import operator
import os
6
7
import pkgutil
import sys
8
9
import traceback
import warnings
10
from collections import OrderedDict
11
12

import pytest
13
import torch
14
import torch.fx
15
import torch.nn as nn
16
17
from _utils_internal import get_relative_path
from common_utils import map_nested_tensor_object, freeze_rng_state, set_rng_seed, cpu_and_gpu, needs_cuda
18
from torchvision import models
19

20
ACCEPT = os.getenv("EXPECTTEST_ACCEPT", "0") == "1"
21
22


23
def get_models_from_module(module):
24
    # TODO add a registration mechanism to torchvision.models
25
    return [v for k, v in module.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
26
27


28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
@pytest.fixture
def disable_weight_loading(mocker):
    """When testing models, the two slowest operations are the downloading of the weights to a file and loading them
    into the model. Unless, you want to test against specific weights, these steps can be disabled without any
    drawbacks.

    Including this fixture into the signature of your test, i.e. `test_foo(disable_weight_loading)`, will recurse
    through all models in `torchvision.models` and will patch all occurrences of the function
    `download_state_dict_from_url` as well as the method `load_state_dict` on all subclasses of `nn.Module` to be
    no-ops.

    .. warning:

        Loaded models are still executable as normal, but will always have random weights. Make sure to not use this
        fixture if you want to compare the model output against reference values.

    """
    starting_point = models
    function_name = "load_state_dict_from_url"
    method_name = "load_state_dict"

    module_names = {info.name for info in pkgutil.walk_packages(starting_point.__path__, f"{starting_point.__name__}.")}
    targets = {f"torchvision._internally_replaced_utils.{function_name}", f"torch.nn.Module.{method_name}"}
    for name in module_names:
        module = sys.modules.get(name)
        if not module:
            continue

        if function_name in module.__dict__:
            targets.add(f"{module.__name__}.{function_name}")

        targets.update(
            {
                f"{module.__name__}.{obj.__name__}.{method_name}"
                for obj in module.__dict__.values()
                if isinstance(obj, type) and issubclass(obj, nn.Module) and method_name in obj.__dict__
            }
        )

    for target in targets:
        # See https://github.com/pytorch/vision/pull/4867#discussion_r743677802 for details
        with contextlib.suppress(AttributeError):
            mocker.patch(target)


73
74
75
76
77
78
def _get_expected_file(name=None):
    # Determine expected file based on environment
    expected_file_base = get_relative_path(os.path.realpath(__file__), "expect")

    # Note: for legacy reasons, the reference file names all had "ModelTest.test_" in their names
    # We hardcode it here to avoid having to re-generate the reference files
79
    expected_file = expected_file = os.path.join(expected_file_base, "ModelTester.test_" + name)
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
    expected_file += "_expect.pkl"

    if not ACCEPT and not os.path.exists(expected_file):
        raise RuntimeError(
            f"No expect file exists for {os.path.basename(expected_file)} in {expected_file}; "
            "to accept the current output, re-run the failing test after setting the EXPECTTEST_ACCEPT "
            "env variable. For example: EXPECTTEST_ACCEPT=1 pytest test/test_models.py -k alexnet"
        )

    return expected_file


def _assert_expected(output, name, prec):
    """Test that a python value matches the recorded contents of a file
    based on a "check" name. The value must be
    pickable with `torch.save`. This file
    is placed in the 'expect' directory in the same directory
    as the test script. You can automatically update the recorded test
    output using an EXPECTTEST_ACCEPT=1 env variable.
    """
    expected_file = _get_expected_file(name)

    if ACCEPT:
        filename = {os.path.basename(expected_file)}
104
        print(f"Accepting updated output for {filename}:\n\n{output}")
105
106
107
108
        torch.save(output, expected_file)
        MAX_PICKLE_SIZE = 50 * 1000  # 50 KB
        binary_size = os.path.getsize(expected_file)
        if binary_size > MAX_PICKLE_SIZE:
109
            raise RuntimeError(f"The output for {filename}, is larger than 50kb")
110
111
112
113
114
115
116
117
118
119
120
    else:
        expected = torch.load(expected_file)
        rtol = atol = prec
        torch.testing.assert_close(output, expected, rtol=rtol, atol=atol, check_dtype=False)


def _check_jit_scriptable(nn_module, args, unwrapper=None, skip=False):
    """Check that a nn.Module's results in TorchScript match eager and that it can be exported"""

    def assert_export_import_module(m, args):
        """Check that the results of a model are the same after saving and loading"""
121

122
123
124
125
126
127
128
129
130
131
132
133
134
135
        def get_export_import_copy(m):
            """Save and load a TorchScript model"""
            buffer = io.BytesIO()
            torch.jit.save(m, buffer)
            buffer.seek(0)
            imported = torch.jit.load(buffer)
            return imported

        m_import = get_export_import_copy(m)
        with freeze_rng_state():
            results = m(*args)
        with freeze_rng_state():
            results_from_imported = m_import(*args)
        tol = 3e-4
136
        torch.testing.assert_close(results, results_from_imported, atol=tol, rtol=tol)
137

138
    TEST_WITH_SLOW = os.getenv("PYTORCH_TEST_WITH_SLOW", "0") == "1"
139
140
    if not TEST_WITH_SLOW or skip:
        # TorchScript is not enabled, skip these tests
141
        msg = (
142
            f"The check_jit_scriptable test for {nn_module.__class__.__name__} was skipped. "
143
144
145
146
            "This test checks if the module's results in TorchScript "
            "match eager and that it can be exported. To run these "
            "tests make sure you set the environment variable "
            "PYTORCH_TEST_WITH_SLOW=1 and that the test is not "
147
            "manually skipped."
148
        )
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
        warnings.warn(msg, RuntimeWarning)
        return None

    sm = torch.jit.script(nn_module)

    with freeze_rng_state():
        eager_out = nn_module(*args)

    with freeze_rng_state():
        script_out = sm(*args)
        if unwrapper:
            script_out = unwrapper(script_out)

    torch.testing.assert_close(eager_out, script_out, atol=1e-4, rtol=1e-4)
    assert_export_import_module(sm, args)


166
167
168
169
170
171
172
def _check_fx_compatible(model, inputs):
    model_fx = torch.fx.symbolic_trace(model)
    out = model(inputs)
    out_fx = model_fx(inputs)
    torch.testing.assert_close(out, out_fx)


173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
def _check_input_backprop(model, inputs):
    if isinstance(inputs, list):
        requires_grad = list()
        for inp in inputs:
            requires_grad.append(inp.requires_grad)
            inp.requires_grad_(True)
    else:
        requires_grad = inputs.requires_grad
        inputs.requires_grad_(True)

    out = model(inputs)

    if isinstance(out, dict):
        out["out"].sum().backward()
    else:
        if isinstance(out[0], dict):
            out[0]["scores"].sum().backward()
        else:
            out[0].sum().backward()

    if isinstance(inputs, list):
        for i, inp in enumerate(inputs):
            assert inputs[i].grad is not None
            inp.requires_grad_(requires_grad[i])
    else:
        assert inputs.grad is not None
        inputs.requires_grad_(requires_grad)


202
203
204
# If 'unwrapper' is provided it will be called with the script model outputs
# before they are compared to the eager model outputs. This is useful if the
# model outputs are different between TorchScript / Eager mode
205
script_model_unwrapper = {
206
207
    "googlenet": lambda x: x.logits,
    "inception_v3": lambda x: x.logits,
208
    "fasterrcnn_resnet50_fpn": lambda x: x[1],
209
    "fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
210
    "fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
211
212
213
    "maskrcnn_resnet50_fpn": lambda x: x[1],
    "keypointrcnn_resnet50_fpn": lambda x: x[1],
    "retinanet_resnet50_fpn": lambda x: x[1],
214
    "ssd300_vgg16": lambda x: x[1],
215
    "ssdlite320_mobilenet_v3_large": lambda x: x[1],
216
}
217
218


219
220
221
222
223
224
225
226
227
228
229
230
231
232
# The following models exhibit flaky numerics under autocast in _test_*_model harnesses.
# This may be caused by the harness environment (e.g. num classes, input initialization
# via torch.rand), and does not prove autocast is unsuitable when training with real data
# (autocast has been used successfully with real data for some of these models).
# TODO:  investigate why autocast numerics are flaky in the harnesses.
#
# For the following models, _test_*_model harnesses skip numerical checks on outputs when
# trying autocast. However, they still try an autocasted forward pass, so they still ensure
# autocast coverage suffices to prevent dtype errors in each model.
autocast_flaky_numerics = (
    "inception_v3",
    "resnet101",
    "resnet152",
    "wide_resnet101_2",
233
234
    "deeplabv3_resnet50",
    "deeplabv3_resnet101",
235
    "deeplabv3_mobilenet_v3_large",
236
237
    "fcn_resnet50",
    "fcn_resnet101",
238
    "lraspp_mobilenet_v3_large",
239
    "maskrcnn_resnet50_fpn",
240
241
)

242
243
244
# The tests for the following quantized models are flaky possibly due to inconsistent
# rounding errors in different platforms. For this reason the input/output consistency
# tests under test_quantized_classification_model will be skipped for the following models.
245
quantized_flaky_models = ("inception_v3", "resnet50")
246

247

248
249
250
# The following contains configuration parameters for all models which are used by
# the _test_*_model methods.
_model_params = {
251
252
253
254
255
256
257
    "inception_v3": {"input_shape": (1, 3, 299, 299)},
    "retinanet_resnet50_fpn": {
        "num_classes": 20,
        "score_thresh": 0.01,
        "min_size": 224,
        "max_size": 224,
        "input_shape": (3, 224, 224),
258
    },
259
260
261
262
263
264
    "keypointrcnn_resnet50_fpn": {
        "num_classes": 2,
        "min_size": 224,
        "max_size": 224,
        "box_score_thresh": 0.15,
        "input_shape": (3, 224, 224),
265
    },
266
267
268
269
270
    "fasterrcnn_resnet50_fpn": {
        "num_classes": 20,
        "min_size": 224,
        "max_size": 224,
        "input_shape": (3, 224, 224),
271
    },
272
273
274
275
276
    "maskrcnn_resnet50_fpn": {
        "num_classes": 10,
        "min_size": 224,
        "max_size": 224,
        "input_shape": (3, 224, 224),
277
    },
278
279
    "fasterrcnn_mobilenet_v3_large_fpn": {
        "box_score_thresh": 0.02076,
280
    },
281
282
283
284
    "fasterrcnn_mobilenet_v3_large_320_fpn": {
        "box_score_thresh": 0.02076,
        "rpn_pre_nms_top_n_test": 1000,
        "rpn_post_nms_top_n_test": 1000,
285
286
287
288
    },
}


289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
# The following contains configuration and expected values to be used tests that are model specific
_model_tests_values = {
    "retinanet_resnet50_fpn": {
        "max_trainable": 5,
        "n_trn_params_per_layer": [36, 46, 65, 78, 88, 89],
    },
    "keypointrcnn_resnet50_fpn": {
        "max_trainable": 5,
        "n_trn_params_per_layer": [48, 58, 77, 90, 100, 101],
    },
    "fasterrcnn_resnet50_fpn": {
        "max_trainable": 5,
        "n_trn_params_per_layer": [30, 40, 59, 72, 82, 83],
    },
    "maskrcnn_resnet50_fpn": {
        "max_trainable": 5,
        "n_trn_params_per_layer": [42, 52, 71, 84, 94, 95],
    },
    "fasterrcnn_mobilenet_v3_large_fpn": {
        "max_trainable": 6,
        "n_trn_params_per_layer": [22, 23, 44, 70, 91, 97, 100],
    },
    "fasterrcnn_mobilenet_v3_large_320_fpn": {
        "max_trainable": 6,
        "n_trn_params_per_layer": [22, 23, 44, 70, 91, 97, 100],
    },
    "ssd300_vgg16": {
        "max_trainable": 5,
        "n_trn_params_per_layer": [45, 51, 57, 63, 67, 71],
    },
    "ssdlite320_mobilenet_v3_large": {
        "max_trainable": 6,
        "n_trn_params_per_layer": [96, 99, 138, 200, 239, 257, 266],
    },
}


Anirudh's avatar
Anirudh committed
326
327
328
329
330
331
332
333
334
335
def _make_sliced_model(model, stop_layer):
    layers = OrderedDict()
    for name, layer in model.named_children():
        layers[name] = layer
        if name == stop_layer:
            break
    new_model = torch.nn.Sequential(layers)
    return new_model


336
337
@pytest.mark.parametrize("model_fn", [models.densenet121, models.densenet169, models.densenet201, models.densenet161])
def test_memory_efficient_densenet(model_fn):
Anirudh's avatar
Anirudh committed
338
339
340
    input_shape = (1, 3, 300, 300)
    x = torch.rand(input_shape)

341
    model1 = model_fn(num_classes=50, memory_efficient=True)
Anirudh's avatar
Anirudh committed
342
    params = model1.state_dict()
343
    num_params = sum(x.numel() for x in model1.parameters())
Anirudh's avatar
Anirudh committed
344
345
346
    model1.eval()
    out1 = model1(x)
    out1.sum().backward()
347
    num_grad = sum(x.grad.numel() for x in model1.parameters() if x.grad is not None)
Anirudh's avatar
Anirudh committed
348

349
    model2 = model_fn(num_classes=50, memory_efficient=False)
Anirudh's avatar
Anirudh committed
350
351
352
353
354
355
356
    model2.load_state_dict(params)
    model2.eval()
    out2 = model2(x)

    assert num_params == num_grad
    torch.testing.assert_close(out1, out2, rtol=0.0, atol=1e-5)

357
358
359
    _check_input_backprop(model1, x)
    _check_input_backprop(model2, x)

Anirudh's avatar
Anirudh committed
360

361
362
363
@pytest.mark.parametrize("dilate_layer_2", (True, False))
@pytest.mark.parametrize("dilate_layer_3", (True, False))
@pytest.mark.parametrize("dilate_layer_4", (True, False))
Anirudh's avatar
Anirudh committed
364
365
def test_resnet_dilation(dilate_layer_2, dilate_layer_3, dilate_layer_4):
    # TODO improve tests to also check that each layer has the right dimensionality
366
    model = models.resnet50(replace_stride_with_dilation=(dilate_layer_2, dilate_layer_3, dilate_layer_4))
Anirudh's avatar
Anirudh committed
367
368
369
370
371
372
373
374
375
    model = _make_sliced_model(model, stop_layer="layer4")
    model.eval()
    x = torch.rand(1, 3, 224, 224)
    out = model(x)
    f = 2 ** sum((dilate_layer_2, dilate_layer_3, dilate_layer_4))
    assert out.shape == (1, 2048, 7 * f, 7 * f)


def test_mobilenet_v2_residual_setting():
376
    model = models.mobilenet_v2(inverted_residual_setting=[[1, 16, 1, 1], [6, 24, 2, 2]])
Anirudh's avatar
Anirudh committed
377
378
379
380
381
382
    model.eval()
    x = torch.rand(1, 3, 224, 224)
    out = model(x)
    assert out.shape[-1] == 1000


383
384
385
@pytest.mark.parametrize("model_fn", [models.mobilenet_v2, models.mobilenet_v3_large, models.mobilenet_v3_small])
def test_mobilenet_norm_layer(model_fn):
    model = model_fn()
Anirudh's avatar
Anirudh committed
386
387
388
389
390
    assert any(isinstance(x, nn.BatchNorm2d) for x in model.modules())

    def get_gn(num_channels):
        return nn.GroupNorm(32, num_channels)

391
    model = model_fn(norm_layer=get_gn)
392
    assert not (any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))
Anirudh's avatar
Anirudh committed
393
394
395
396
397
398
    assert any(isinstance(x, nn.GroupNorm) for x in model.modules())


def test_inception_v3_eval():
    # replacement for models.inception_v3(pretrained=True) that does not download weights
    kwargs = {}
399
400
401
    kwargs["transform_input"] = True
    kwargs["aux_logits"] = True
    kwargs["init_weights"] = False
Anirudh's avatar
Anirudh committed
402
403
404
405
406
407
408
    name = "inception_v3"
    model = models.Inception3(**kwargs)
    model.aux_logits = False
    model.AuxLogits = None
    model = model.eval()
    x = torch.rand(1, 3, 299, 299)
    _check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(name, None))
409
    _check_input_backprop(model, x)
Anirudh's avatar
Anirudh committed
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424


def test_fasterrcnn_double():
    model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)
    model.double()
    model.eval()
    input_shape = (3, 300, 300)
    x = torch.rand(input_shape, dtype=torch.float64)
    model_input = [x]
    out = model(model_input)
    assert model_input[0] is x
    assert len(out) == 1
    assert "boxes" in out[0]
    assert "scores" in out[0]
    assert "labels" in out[0]
425
    _check_input_backprop(model, model_input)
Anirudh's avatar
Anirudh committed
426
427
428
429
430


def test_googlenet_eval():
    # replacement for models.googlenet(pretrained=True) that does not download weights
    kwargs = {}
431
432
433
    kwargs["transform_input"] = True
    kwargs["aux_logits"] = True
    kwargs["init_weights"] = False
Anirudh's avatar
Anirudh committed
434
435
436
437
438
439
440
441
    name = "googlenet"
    model = models.GoogLeNet(**kwargs)
    model.aux_logits = False
    model.aux1 = None
    model.aux2 = None
    model = model.eval()
    x = torch.rand(1, 3, 224, 224)
    _check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(name, None))
442
    _check_input_backprop(model, x)
Anirudh's avatar
Anirudh committed
443
444
445
446
447
448
449
450
451
452
453
454
455
456


@needs_cuda
def test_fasterrcnn_switch_devices():
    def checkOut(out):
        assert len(out) == 1
        assert "boxes" in out[0]
        assert "scores" in out[0]
        assert "labels" in out[0]

    model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)
    model.cuda()
    model.eval()
    input_shape = (3, 300, 300)
457
    x = torch.rand(input_shape, device="cuda")
Anirudh's avatar
Anirudh committed
458
459
460
461
462
463
464
    model_input = [x]
    out = model(model_input)
    assert model_input[0] is x

    checkOut(out)

    with torch.cuda.amp.autocast():
465
        out = model(model_input)
466

Anirudh's avatar
Anirudh committed
467
    checkOut(out)
468

469
470
    _check_input_backprop(model, model_input)

Anirudh's avatar
Anirudh committed
471
472
473
474
    # now switch to cpu and make sure it works
    model.cpu()
    x = x.cpu()
    out_cpu = model([x])
475

Anirudh's avatar
Anirudh committed
476
    checkOut(out_cpu)
477

478
479
    _check_input_backprop(model, [x])

480

Anirudh's avatar
Anirudh committed
481
def test_generalizedrcnn_transform_repr():
482

Anirudh's avatar
Anirudh committed
483
484
485
    min_size, max_size = 224, 299
    image_mean = [0.485, 0.456, 0.406]
    image_std = [0.229, 0.224, 0.225]
486

487
488
489
    t = models.detection.transform.GeneralizedRCNNTransform(
        min_size=min_size, max_size=max_size, image_mean=image_mean, image_std=image_std
    )
490

Anirudh's avatar
Anirudh committed
491
    # Check integrity of object __repr__ attribute
492
493
    expected_string = "GeneralizedRCNNTransform("
    _indent = "\n    "
494
495
    expected_string += f"{_indent}Normalize(mean={image_mean}, std={image_std})"
    expected_string += f"{_indent}Resize(min_size=({min_size},), max_size={max_size}, "
Anirudh's avatar
Anirudh committed
496
497
    expected_string += "mode='bilinear')\n)"
    assert t.__repr__() == expected_string
498
499


500
@pytest.mark.parametrize("model_fn", get_models_from_module(models))
501
@pytest.mark.parametrize("dev", cpu_and_gpu())
502
def test_classification_model(model_fn, dev):
Anirudh's avatar
Anirudh committed
503
504
    set_rng_seed(0)
    defaults = {
505
506
        "num_classes": 50,
        "input_shape": (1, 3, 224, 224),
Anirudh's avatar
Anirudh committed
507
    }
508
    model_name = model_fn.__name__
Anirudh's avatar
Anirudh committed
509
    kwargs = {**defaults, **_model_params.get(model_name, {})}
510
    num_classes = kwargs.get("num_classes")
511
    input_shape = kwargs.pop("input_shape")
Anirudh's avatar
Anirudh committed
512

513
    model = model_fn(**kwargs)
Anirudh's avatar
Anirudh committed
514
515
516
517
518
    model.eval().to(device=dev)
    # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
    x = torch.rand(input_shape).to(device=dev)
    out = model(x)
    _assert_expected(out.cpu(), model_name, prec=0.1)
519
    assert out.shape[-1] == num_classes
Anirudh's avatar
Anirudh committed
520
    _check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None))
521
    _check_fx_compatible(model, x)
Anirudh's avatar
Anirudh committed
522
523
524
525
526
527
528
529

    if dev == torch.device("cuda"):
        with torch.cuda.amp.autocast():
            out = model(x)
            # See autocast_flaky_numerics comment at top of file.
            if model_name not in autocast_flaky_numerics:
                _assert_expected(out.cpu(), model_name, prec=0.1)
            assert out.shape[-1] == 50
530

531
532
    _check_input_backprop(model, x)

533

534
@pytest.mark.parametrize("model_fn", get_models_from_module(models.segmentation))
535
@pytest.mark.parametrize("dev", cpu_and_gpu())
536
def test_segmentation_model(model_fn, dev):
Anirudh's avatar
Anirudh committed
537
538
    set_rng_seed(0)
    defaults = {
539
540
541
        "num_classes": 10,
        "pretrained_backbone": False,
        "input_shape": (1, 3, 32, 32),
Anirudh's avatar
Anirudh committed
542
    }
543
    model_name = model_fn.__name__
Anirudh's avatar
Anirudh committed
544
    kwargs = {**defaults, **_model_params.get(model_name, {})}
545
    input_shape = kwargs.pop("input_shape")
Anirudh's avatar
Anirudh committed
546

547
    model = model_fn(**kwargs)
Anirudh's avatar
Anirudh committed
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
    model.eval().to(device=dev)
    # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
    x = torch.rand(input_shape).to(device=dev)
    out = model(x)["out"]

    def check_out(out):
        prec = 0.01
        try:
            # We first try to assert the entire output if possible. This is not
            # only the best way to assert results but also handles the cases
            # where we need to create a new expected result.
            _assert_expected(out.cpu(), model_name, prec=prec)
        except AssertionError:
            # Unfortunately some segmentation models are flaky with autocast
            # so instead of validating the probability scores, check that the class
            # predictions match.
            expected_file = _get_expected_file(model_name)
            expected = torch.load(expected_file)
            torch.testing.assert_close(out.argmax(dim=1), expected.argmax(dim=1), rtol=prec, atol=prec)
            return False  # Partial validation performed

        return True  # Full validation performed

    full_validation = check_out(out)

    _check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None))
574
    _check_fx_compatible(model, x)
Anirudh's avatar
Anirudh committed
575
576
577
578
579
580
581
582
583

    if dev == torch.device("cuda"):
        with torch.cuda.amp.autocast():
            out = model(x)["out"]
            # See autocast_flaky_numerics comment at top of file.
            if model_name not in autocast_flaky_numerics:
                full_validation &= check_out(out)

    if not full_validation:
584
        msg = (
585
            f"The output of {test_segmentation_model.__name__} could only be partially validated. "
586
587
            "This is likely due to unit-test flakiness, but you may "
            "want to do additional manual checks if you made "
588
            "significant changes to the codebase."
589
        )
Anirudh's avatar
Anirudh committed
590
591
        warnings.warn(msg, RuntimeWarning)
        pytest.skip(msg)
592

593
594
    _check_input_backprop(model, x)

595

596
@pytest.mark.parametrize("model_fn", get_models_from_module(models.detection))
597
@pytest.mark.parametrize("dev", cpu_and_gpu())
598
def test_detection_model(model_fn, dev):
Anirudh's avatar
Anirudh committed
599
600
    set_rng_seed(0)
    defaults = {
601
602
603
        "num_classes": 50,
        "pretrained_backbone": False,
        "input_shape": (3, 300, 300),
Anirudh's avatar
Anirudh committed
604
    }
605
    model_name = model_fn.__name__
Anirudh's avatar
Anirudh committed
606
    kwargs = {**defaults, **_model_params.get(model_name, {})}
607
    input_shape = kwargs.pop("input_shape")
Anirudh's avatar
Anirudh committed
608

609
    model = model_fn(**kwargs)
Anirudh's avatar
Anirudh committed
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
    model.eval().to(device=dev)
    # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
    x = torch.rand(input_shape).to(device=dev)
    model_input = [x]
    out = model(model_input)
    assert model_input[0] is x

    def check_out(out):
        assert len(out) == 1

        def compact(tensor):
            size = tensor.size()
            elements_per_sample = functools.reduce(operator.mul, size[1:], 1)
            if elements_per_sample > 30:
                return compute_mean_std(tensor)
            else:
                return subsample_tensor(tensor)

        def subsample_tensor(tensor):
            num_elems = tensor.size(0)
            num_samples = 20
            if num_elems <= num_samples:
                return tensor

            ith_index = num_elems // num_samples
635
            return tensor[ith_index - 1 :: ith_index]
Anirudh's avatar
Anirudh committed
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657

        def compute_mean_std(tensor):
            # can't compute mean of integral tensor
            tensor = tensor.to(torch.double)
            mean = torch.mean(tensor)
            std = torch.std(tensor)
            return {"mean": mean, "std": std}

        output = map_nested_tensor_object(out, tensor_map_fn=compact)
        prec = 0.01
        try:
            # We first try to assert the entire output if possible. This is not
            # only the best way to assert results but also handles the cases
            # where we need to create a new expected result.
            _assert_expected(output, model_name, prec=prec)
        except AssertionError:
            # Unfortunately detection models are flaky due to the unstable sort
            # in NMS. If matching across all outputs fails, use the same approach
            # as in NMSTester.test_nms_cuda to see if this is caused by duplicate
            # scores.
            expected_file = _get_expected_file(model_name)
            expected = torch.load(expected_file)
658
659
660
            torch.testing.assert_close(
                output[0]["scores"], expected[0]["scores"], rtol=prec, atol=prec, check_device=False, check_dtype=False
            )
Anirudh's avatar
Anirudh committed
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680

            # Note: Fmassa proposed turning off NMS by adapting the threshold
            # and then using the Hungarian algorithm as in DETR to find the
            # best match between output and expected boxes and eliminate some
            # of the flakiness. Worth exploring.
            return False  # Partial validation performed

        return True  # Full validation performed

    full_validation = check_out(out)
    _check_jit_scriptable(model, ([x],), unwrapper=script_model_unwrapper.get(model_name, None))

    if dev == torch.device("cuda"):
        with torch.cuda.amp.autocast():
            out = model(model_input)
            # See autocast_flaky_numerics comment at top of file.
            if model_name not in autocast_flaky_numerics:
                full_validation &= check_out(out)

    if not full_validation:
681
        msg = (
682
            f"The output of {test_detection_model.__name__} could only be partially validated. "
683
684
            "This is likely due to unit-test flakiness, but you may "
            "want to do additional manual checks if you made "
685
            "significant changes to the codebase."
686
        )
Anirudh's avatar
Anirudh committed
687
688
        warnings.warn(msg, RuntimeWarning)
        pytest.skip(msg)
689

690
691
    _check_input_backprop(model, model_input)

692

693
694
@pytest.mark.parametrize("model_fn", get_models_from_module(models.detection))
def test_detection_model_validation(model_fn):
Anirudh's avatar
Anirudh committed
695
    set_rng_seed(0)
696
    model = model_fn(num_classes=50, pretrained_backbone=False)
Anirudh's avatar
Anirudh committed
697
698
699
700
701
702
703
704
    input_shape = (3, 300, 300)
    x = [torch.rand(input_shape)]

    # validate that targets are present in training
    with pytest.raises(ValueError):
        model(x)

    # validate type
705
    targets = [{"boxes": 0.0}]
Anirudh's avatar
Anirudh committed
706
707
708
709
710
    with pytest.raises(ValueError):
        model(x, targets=targets)

    # validate boxes shape
    for boxes in (torch.rand((4,)), torch.rand((1, 5))):
711
        targets = [{"boxes": boxes}]
Anirudh's avatar
Anirudh committed
712
713
714
715
716
        with pytest.raises(ValueError):
            model(x, targets=targets)

    # validate that no degenerate boxes are present
    boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]])
717
    targets = [{"boxes": boxes}]
Anirudh's avatar
Anirudh committed
718
719
    with pytest.raises(ValueError):
        model(x, targets=targets)
720

721

722
@pytest.mark.parametrize("model_fn", get_models_from_module(models.video))
723
@pytest.mark.parametrize("dev", cpu_and_gpu())
724
def test_video_model(model_fn, dev):
Anirudh's avatar
Anirudh committed
725
726
727
    # the default input shape is
    # bs * num_channels * clip_len * h *w
    input_shape = (1, 3, 4, 112, 112)
728
    model_name = model_fn.__name__
Anirudh's avatar
Anirudh committed
729
    # test both basicblock and Bottleneck
730
    model = model_fn(num_classes=50)
Anirudh's avatar
Anirudh committed
731
732
733
734
735
    model.eval().to(device=dev)
    # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
    x = torch.rand(input_shape).to(device=dev)
    out = model(x)
    _check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None))
736
    _check_fx_compatible(model, x)
Anirudh's avatar
Anirudh committed
737
738
739
740
741
742
    assert out.shape[-1] == 50

    if dev == torch.device("cuda"):
        with torch.cuda.amp.autocast():
            out = model(x)
            assert out.shape[-1] == 50
743

744
745
    _check_input_backprop(model, x)

746

747
748
749
750
751
752
753
@pytest.mark.skipif(
    not (
        "fbgemm" in torch.backends.quantized.supported_engines
        and "qnnpack" in torch.backends.quantized.supported_engines
    ),
    reason="This Pytorch Build has not been built with fbgemm and qnnpack",
)
754
755
@pytest.mark.parametrize("model_fn", get_models_from_module(models.quantization))
def test_quantized_classification_model(model_fn):
756
    set_rng_seed(0)
757
    defaults = {
758
        "num_classes": 5,
759
760
761
        "input_shape": (1, 3, 224, 224),
        "pretrained": False,
        "quantize": True,
762
    }
763
    model_name = model_fn.__name__
764
    kwargs = {**defaults, **_model_params.get(model_name, {})}
765
    input_shape = kwargs.pop("input_shape")
766
767

    # First check if quantize=True provides models that can run with input data
768
    model = model_fn(**kwargs)
769
    model.eval()
770
    x = torch.rand(input_shape)
771
772
773
774
775
776
777
    out = model(x)

    if model_name not in quantized_flaky_models:
        _assert_expected(out, model_name + "_quantized", prec=0.1)
        assert out.shape[-1] == 5
        _check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None))
        _check_fx_compatible(model, x)
778

779
    kwargs["quantize"] = False
780
    for eval_mode in [True, False]:
781
        model = model_fn(**kwargs)
782
783
        if eval_mode:
            model.eval()
784
            model.qconfig = torch.ao.quantization.default_qconfig
785
786
        else:
            model.train()
787
            model.qconfig = torch.ao.quantization.default_qat_qconfig
788
789
790

        model.fuse_model()
        if eval_mode:
791
            torch.ao.quantization.prepare(model, inplace=True)
792
        else:
793
            torch.ao.quantization.prepare_qat(model, inplace=True)
794
795
            model.eval()

796
        torch.ao.quantization.convert(model, inplace=True)
797
798
799
800
801
802
803
804

    try:
        torch.jit.script(model)
    except Exception as e:
        tb = traceback.format_exc()
        raise AssertionError(f"model cannot be scripted. Traceback = {str(tb)}") from e


805
@pytest.mark.parametrize("model_fn", get_models_from_module(models.detection))
806
def test_detection_model_trainable_backbone_layers(model_fn, disable_weight_loading):
807
    model_name = model_fn.__name__
808
809
810
    max_trainable = _model_tests_values[model_name]["max_trainable"]
    n_trainable_params = []
    for trainable_layers in range(0, max_trainable + 1):
811
        model = model_fn(pretrained=False, pretrained_backbone=True, trainable_backbone_layers=trainable_layers)
812
813
814
815
816

        n_trainable_params.append(len([p for p in model.parameters() if p.requires_grad]))
    assert n_trainable_params == _model_tests_values[model_name]["n_trn_params_per_layer"]


817
if __name__ == "__main__":
818
    pytest.main([__file__])