base.py 3.32 KB
Newer Older
zhangwenwei's avatar
zhangwenwei committed
1
from os import path as osp
liyinhao's avatar
liyinhao committed
2
3

from mmdet3d.core import Box3DMode, show_result
zhangwenwei's avatar
zhangwenwei committed
4
from mmdet.models.detectors import BaseDetector
zhangwenwei's avatar
zhangwenwei committed
5
6


zhangwenwei's avatar
zhangwenwei committed
7
class Base3DDetector(BaseDetector):
zhangwenwei's avatar
zhangwenwei committed
8
    """Base class for detectors."""
zhangwenwei's avatar
zhangwenwei committed
9

zhangwenwei's avatar
zhangwenwei committed
10
    def forward_test(self, points, img_metas, img=None, **kwargs):
zhangwenwei's avatar
zhangwenwei committed
11
12
        """
        Args:
liyinhao's avatar
liyinhao committed
13
14
            points (list[torch.Tensor]): the outer list indicates test-time
                augmentations and inner torch.Tensor should have a shape NxC,
zhangwenwei's avatar
zhangwenwei committed
15
                which contains all points in the batch.
liyinhao's avatar
liyinhao committed
16
            img_metas (list[list[dict]]): the outer list indicates test-time
zhangwenwei's avatar
zhangwenwei committed
17
18
                augs (multiscale, flip, etc.) and the inner list indicates
                images in a batch
liyinhao's avatar
liyinhao committed
19
20
21
22
            img (list[torch.Tensor], optional): the outer
                list indicates test-time augmentations and inner
                torch.Tensor should have a shape NxCxHxW, which contains
                all images in the batch. Defaults to None.
zhangwenwei's avatar
zhangwenwei committed
23
        """
zhangwenwei's avatar
zhangwenwei committed
24
        for var, name in [(points, 'points'), (img_metas, 'img_metas')]:
zhangwenwei's avatar
zhangwenwei committed
25
26
27
28
            if not isinstance(var, list):
                raise TypeError('{} must be a list, but got {}'.format(
                    name, type(var)))

zhangwenwei's avatar
zhangwenwei committed
29
        num_augs = len(points)
zhangwenwei's avatar
zhangwenwei committed
30
31
32
        if num_augs != len(img_metas):
            raise ValueError(
                'num of augmentations ({}) != num of image meta ({})'.format(
zhangwenwei's avatar
zhangwenwei committed
33
                    len(points), len(img_metas)))
zhangwenwei's avatar
zhangwenwei committed
34
        # TODO: remove the restriction of imgs_per_gpu == 1 when prepared
zhangwenwei's avatar
zhangwenwei committed
35
36
        samples_per_gpu = len(points[0])
        assert samples_per_gpu == 1
zhangwenwei's avatar
zhangwenwei committed
37
38

        if num_augs == 1:
zhangwenwei's avatar
zhangwenwei committed
39
40
            img = [img] if img is None else img
            return self.simple_test(points[0], img_metas[0], img[0], **kwargs)
zhangwenwei's avatar
zhangwenwei committed
41
        else:
zhangwenwei's avatar
zhangwenwei committed
42
            return self.aug_test(points, img_metas, img, **kwargs)
zhangwenwei's avatar
zhangwenwei committed
43

zhangwenwei's avatar
zhangwenwei committed
44
    def forward(self, return_loss=True, **kwargs):
zhangwenwei's avatar
zhangwenwei committed
45
46
47
48
49
        """Calls either forward_train or forward_test depending on whether
        return_loss=True.

        Note this setting will change the expected inputs. When
        `return_loss=True`, img and img_metas are single-nested (i.e.
liyinhao's avatar
liyinhao committed
50
        torch.Tensor and list[dict]), and when `resturn_loss=False`, img and
zhangwenwei's avatar
zhangwenwei committed
51
52
53
        img_metas should be double nested (i.e.  list[torch.Tensor],
        list[list[dict]]), with the outer list indicating test time
        augmentations.
zhangwenwei's avatar
zhangwenwei committed
54
55
        """
        if return_loss:
zhangwenwei's avatar
zhangwenwei committed
56
            return self.forward_train(**kwargs)
zhangwenwei's avatar
zhangwenwei committed
57
        else:
zhangwenwei's avatar
zhangwenwei committed
58
            return self.forward_test(**kwargs)
liyinhao's avatar
liyinhao committed
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78

    def show_results(self, data, result, out_dir):
        points = data['points'][0]._data[0][0].numpy()
        pts_filename = data['img_metas'][0]._data[0][0]['pts_filename']
        file_name = osp.split(pts_filename)[-1].split('.')[0]

        assert out_dir is not None, 'Expect out_dir, got none.'

        pred_bboxes = result['pts_bbox']['boxes_3d'].tensor.numpy()
        # for now we convert points into depth mode
        if data['img_metas'][0]._data[0][0]['box_mode_3d'] != Box3DMode.DEPTH:
            points = points[..., [1, 0, 2]]
            points[..., 0] *= -1
            pred_bboxes = Box3DMode.convert(
                pred_bboxes, data['img_metas'][0]._data[0][0]['box_mode_3d'],
                Box3DMode.DEPTH)
            pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
        else:
            pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
        show_result(points, None, pred_bboxes, out_dir, file_name)