sunrgbd_dataset.py 8.05 KB
Newer Older
liyinhao's avatar
liyinhao committed
1
import numpy as np
2
from collections import OrderedDict
zhangwenwei's avatar
zhangwenwei committed
3
from os import path as osp
liyinhao's avatar
liyinhao committed
4

liyinhao's avatar
liyinhao committed
5
from mmdet3d.core import show_result
wuyuefeng's avatar
wuyuefeng committed
6
from mmdet3d.core.bbox import DepthInstance3DBoxes
7
from mmdet.core import eval_map
liyinhao's avatar
liyinhao committed
8
from mmdet.datasets import DATASETS
zhangwenwei's avatar
zhangwenwei committed
9
from .custom_3d import Custom3DDataset
liyinhao's avatar
liyinhao committed
10
11
12


@DATASETS.register_module()
zhangwenwei's avatar
zhangwenwei committed
13
class SUNRGBDDataset(Custom3DDataset):
zhangwenwei's avatar
zhangwenwei committed
14
    r"""SUNRGBD Dataset.
liyinhao's avatar
liyinhao committed
15

wangtai's avatar
wangtai committed
16
17
    This class serves as the API for experiments on the SUNRGBD Dataset.

zhangwenwei's avatar
zhangwenwei committed
18
19
    See the `download page <http://rgbd.cs.princeton.edu/challenge.html>`_
    for data downloading.
wangtai's avatar
wangtai committed
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
        pipeline (list[dict], optional): Pipeline used for data processing.
            Defaults to None.
        classes (tuple[str], optional): Classes used in the dataset.
            Defaults to None.
        modality (dict, optional): Modality to specify the sensor data used
            as input. Defaults to None.
        box_type_3d (str, optional): Type of 3D box of this dataset.
            Based on the `box_type_3d`, the dataset will encapsulate the box
            to its original format then converted them to `box_type_3d`.
            Defaults to 'Depth' in this dataset. Available options includes

wangtai's avatar
wangtai committed
35
36
37
            - 'LiDAR': Box in LiDAR coordinates.
            - 'Depth': Box in depth coordinates, usually for indoor dataset.
            - 'Camera': Box in camera coordinates.
wangtai's avatar
wangtai committed
38
39
40
41
42
        filter_empty_gt (bool, optional): Whether to filter empty GT.
            Defaults to True.
        test_mode (bool, optional): Whether the dataset is in test mode.
            Defaults to False.
    """
liyinhao's avatar
liyinhao committed
43
44
45
46
    CLASSES = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser',
               'night_stand', 'bookshelf', 'bathtub')

    def __init__(self,
zhangwenwei's avatar
zhangwenwei committed
47
                 data_root,
liyinhao's avatar
liyinhao committed
48
49
                 ann_file,
                 pipeline=None,
liyinhao's avatar
liyinhao committed
50
                 classes=None,
51
                 modality=dict(use_camera=True, use_lidar=True),
52
                 box_type_3d='Depth',
wuyuefeng's avatar
Votenet  
wuyuefeng committed
53
                 filter_empty_gt=True,
zhangwenwei's avatar
zhangwenwei committed
54
                 test_mode=False):
55
56
57
58
59
60
61
62
63
        super().__init__(
            data_root=data_root,
            ann_file=ann_file,
            pipeline=pipeline,
            classes=classes,
            modality=modality,
            box_type_3d=box_type_3d,
            filter_empty_gt=filter_empty_gt,
            test_mode=test_mode)
64
65
        assert 'use_camera' in self.modality and \
            'use_lidar' in self.modality
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
        assert self.modality['use_camera'] or self.modality['use_lidar']

    def get_data_info(self, index):
        """Get data info according to the given index.

        Args:
            index (int): Index of the sample data to get.

        Returns:
            dict: Data information that will be passed to the data \
                preprocessing pipelines. It includes the following keys:

                - sample_idx (str): Sample index.
                - pts_filename (str, optional): Filename of point clouds.
                - file_name (str, optional): Filename of point clouds.
                - img_prefix (str | None, optional): Prefix of image files.
                - img_info (dict, optional): Image info.
                - calib (dict, optional): Camera calibration info.
                - ann_info (dict): Annotation info.
        """
        info = self.data_infos[index]
        sample_idx = info['point_cloud']['lidar_idx']
        assert info['point_cloud']['lidar_idx'] == info['image']['image_idx']
        input_dict = dict(sample_idx=sample_idx)

        if self.modality['use_lidar']:
            pts_filename = osp.join(self.data_root, info['pts_path'])
            input_dict['pts_filename'] = pts_filename
            input_dict['file_name'] = pts_filename

        if self.modality['use_camera']:
97
98
99
            img_filename = osp.join(
                osp.join(self.data_root, 'sunrgbd_trainval'),
                info['image']['image_path'])
100
101
102
103
104
105
106
107
108
109
110
            input_dict['img_prefix'] = None
            input_dict['img_info'] = dict(filename=img_filename)
            calib = info['calib']
            input_dict['calib'] = calib

        if not self.test_mode:
            annos = self.get_ann_info(index)
            input_dict['ann_info'] = annos
            if self.filter_empty_gt and len(annos['gt_bboxes_3d']) == 0:
                return None
        return input_dict
liyinhao's avatar
liyinhao committed
111

liyinhao's avatar
liyinhao committed
112
    def get_ann_info(self, index):
113
114
115
116
117
118
        """Get annotation info according to the given index.

        Args:
            index (int): Index of the annotation data to get.

        Returns:
zhangwenwei's avatar
zhangwenwei committed
119
            dict: annotation information consists of the following keys:
120

zhangwenwei's avatar
zhangwenwei committed
121
                - gt_bboxes_3d (:obj:`DepthInstance3DBoxes`): \
122
                    3D ground truth bboxes
wangtai's avatar
wangtai committed
123
124
125
                - gt_labels_3d (np.ndarray): Labels of ground truths.
                - pts_instance_mask_path (str): Path of instance masks.
                - pts_semantic_mask_path (str): Path of semantic masks.
126
        """
liyinhao's avatar
liyinhao committed
127
        # Use index to get the annos, thus the evalhook could also use this api
liyinhao's avatar
liyinhao committed
128
        info = self.data_infos[index]
liyinhao's avatar
liyinhao committed
129
        if info['annos']['gt_num'] != 0:
liyinhao's avatar
liyinhao committed
130
131
132
            gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
                np.float32)  # k, 6
            gt_labels_3d = info['annos']['class'].astype(np.long)
liyinhao's avatar
liyinhao committed
133
        else:
liyinhao's avatar
liyinhao committed
134
            gt_bboxes_3d = np.zeros((0, 7), dtype=np.float32)
liyinhao's avatar
liyinhao committed
135
            gt_labels_3d = np.zeros((0, ), dtype=np.long)
liyinhao's avatar
liyinhao committed
136

wuyuefeng's avatar
wuyuefeng committed
137
138
139
140
        # to target box structure
        gt_bboxes_3d = DepthInstance3DBoxes(
            gt_bboxes_3d, origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)

liyinhao's avatar
liyinhao committed
141
        anns_results = dict(
liyinhao's avatar
liyinhao committed
142
            gt_bboxes_3d=gt_bboxes_3d, gt_labels_3d=gt_labels_3d)
143
144
145
146
147
148
149
150
151

        if self.modality['use_camera']:
            if info['annos']['gt_num'] != 0:
                gt_bboxes_2d = info['annos']['bbox'].astype(np.float32)
            else:
                gt_bboxes_2d = np.zeros((0, 4), dtype=np.float32)
            anns_results['bboxes'] = gt_bboxes_2d
            anns_results['labels'] = gt_labels_3d

liyinhao's avatar
liyinhao committed
152
        return anns_results
liyinhao's avatar
liyinhao committed
153

154
    def show(self, results, out_dir, show=True):
155
156
157
158
159
        """Results visualization.

        Args:
            results (list[dict]): List of bounding boxes results.
            out_dir (str): Output directory of visualization result.
160
            show (bool): Visualize the results online.
161
        """
liyinhao's avatar
liyinhao committed
162
163
164
165
166
167
168
169
170
        assert out_dir is not None, 'Expect out_dir, got none.'
        for i, result in enumerate(results):
            data_info = self.data_infos[i]
            pts_path = data_info['pts_path']
            file_name = osp.split(pts_path)[-1].split('.')[0]
            points = np.fromfile(
                osp.join(self.data_root, pts_path),
                dtype=np.float32).reshape(-1, 6)
            points[:, 3:] *= 255
171
            gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor
liyinhao's avatar
liyinhao committed
172
            pred_bboxes = result['boxes_3d'].tensor.numpy()
173
174
            show_result(points, gt_bboxes, pred_bboxes, out_dir, file_name,
                        show)
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204

    def evaluate(self,
                 results,
                 metric=None,
                 iou_thr=(0.25, 0.5),
                 iou_thr_2d=(0.5, ),
                 logger=None,
                 show=False,
                 out_dir=None):

        # evaluate 3D detection performance
        if isinstance(results[0], dict):
            return super().evaluate(results, metric, iou_thr, logger, show,
                                    out_dir)
        # evaluate 2D detection performance
        else:
            eval_results = OrderedDict()
            annotations = [self.get_ann_info(i) for i in range(len(self))]
            iou_thr_2d = (iou_thr_2d) if isinstance(iou_thr_2d,
                                                    float) else iou_thr_2d
            for iou_thr_2d_single in iou_thr_2d:
                mean_ap, _ = eval_map(
                    results,
                    annotations,
                    scale_ranges=None,
                    iou_thr=iou_thr_2d_single,
                    dataset=self.CLASSES,
                    logger=logger)
                eval_results['mAP_' + str(iou_thr_2d_single)] = mean_ap
            return eval_results