test_models_detection_anchor_utils.py 3.44 KB
Newer Older
Aditya Oke's avatar
Aditya Oke committed
1
import torch
2
from common_utils import TestCase
3
from torchvision.models.detection.anchor_utils import AnchorGenerator, DefaultBoxGenerator
Aditya Oke's avatar
Aditya Oke committed
4
5
6
from torchvision.models.detection.image_list import ImageList


7
class Tester(TestCase):
Aditya Oke's avatar
Aditya Oke committed
8
9
10
11
12
13
14
15
    def test_incorrect_anchors(self):
        incorrect_sizes = ((2, 4, 8), (32, 8), )
        incorrect_aspects = (0.5, 1.0)
        anc = AnchorGenerator(incorrect_sizes, incorrect_aspects)
        image1 = torch.randn(3, 800, 800)
        image_list = ImageList(image1, [(800, 800)])
        feature_maps = [torch.randn(1, 50)]
        self.assertRaises(ValueError, anc, image_list, feature_maps)
16
17

    def _init_test_anchor_generator(self):
18
19
        anchor_sizes = ((10,),)
        aspect_ratios = ((1,),)
20
21
22
23
        anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)

        return anchor_generator

24
25
26
27
28
29
    def _init_test_defaultbox_generator(self):
        aspect_ratios = [[2]]
        dbox_generator = DefaultBoxGenerator(aspect_ratios)

        return dbox_generator

30
31
    def get_features(self, images):
        s0, s1 = images.shape[-2:]
32
        features = [torch.rand(2, 8, s0 // 5, s1 // 5)]
33
34
35
        return features

    def test_anchor_generator(self):
36
        images = torch.randn(2, 3, 15, 15)
37
38
39
40
41
42
43
44
        features = self.get_features(images)
        image_shapes = [i.shape[-2:] for i in images]
        images = ImageList(images, image_shapes)

        model = self._init_test_anchor_generator()
        model.eval()
        anchors = model(images, features)

45
        # Estimate the number of target anchors
46
47
48
49
50
        grid_sizes = [f.shape[-2:] for f in features]
        num_anchors_estimated = 0
        for sizes, num_anchors_per_loc in zip(grid_sizes, model.num_anchors_per_location()):
            num_anchors_estimated += sizes[0] * sizes[1] * num_anchors_per_loc

51
52
53
54
55
56
57
58
59
60
61
        anchors_output = torch.tensor([[-5., -5., 5., 5.],
                                       [0., -5., 10., 5.],
                                       [5., -5., 15., 5.],
                                       [-5., 0., 5., 10.],
                                       [0., 0., 10., 10.],
                                       [5., 0., 15., 10.],
                                       [-5., 5., 5., 15.],
                                       [0., 5., 10., 15.],
                                       [5., 5., 15., 15.]])

        self.assertEqual(num_anchors_estimated, 9)
62
        self.assertEqual(len(anchors), 2)
63
64
65
66
        self.assertEqual(tuple(anchors[0].shape), (9, 4))
        self.assertEqual(tuple(anchors[1].shape), (9, 4))
        self.assertEqual(anchors[0], anchors_output)
        self.assertEqual(anchors[1], anchors_output)
67
68
69
70
71
72
73
74
75
76
77
78

    def test_defaultbox_generator(self):
        images = torch.zeros(2, 3, 15, 15)
        features = [torch.zeros(2, 8, 1, 1)]
        image_shapes = [i.shape[-2:] for i in images]
        images = ImageList(images, image_shapes)

        model = self._init_test_defaultbox_generator()
        model.eval()
        dboxes = model(images, features)

        dboxes_output = torch.tensor([
79
80
81
82
            [6.3750, 6.3750, 8.6250, 8.6250],
            [4.7443, 4.7443, 10.2557, 10.2557],
            [5.9090, 6.7045, 9.0910, 8.2955],
            [6.7045, 5.9090, 8.2955, 9.0910]
83
84
85
86
87
88
89
        ])

        self.assertEqual(len(dboxes), 2)
        self.assertEqual(tuple(dboxes[0].shape), (4, 4))
        self.assertEqual(tuple(dboxes[1].shape), (4, 4))
        self.assertTrue(dboxes[0].allclose(dboxes_output))
        self.assertTrue(dboxes[1].allclose(dboxes_output))