custom_3d.py 14 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
zhangwenwei's avatar
zhangwenwei committed
2
import tempfile
3
import warnings
zhangwenwei's avatar
zhangwenwei committed
4
from os import path as osp
5
6
7

import mmcv
import numpy as np
zhangwenwei's avatar
zhangwenwei committed
8
from torch.utils.data import Dataset
9
10

from mmdet.datasets import DATASETS
wuyuefeng's avatar
Demo  
wuyuefeng committed
11
from ..core.bbox import get_box_type
12
from .pipelines import Compose
13
from .utils import extract_result_dict, get_loading_pipeline
14
15
16


@DATASETS.register_module()
zhangwenwei's avatar
zhangwenwei committed
17
class Custom3DDataset(Dataset):
zhangwenwei's avatar
zhangwenwei committed
18
    """Customized 3D dataset.
19
20
21
22
23
24
25
26
27
28
29

    This is the base dataset of SUNRGB-D, ScanNet, nuScenes, and KITTI
    dataset.

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
        pipeline (list[dict], optional): Pipeline used for data processing.
            Defaults to None.
        classes (tuple[str], optional): Classes used in the dataset.
            Defaults to None.
wangtai's avatar
wangtai committed
30
        modality (dict, optional): Modality to specify the sensor data used
31
32
33
34
35
            as input. Defaults to None.
        box_type_3d (str, optional): Type of 3D box of this dataset.
            Based on the `box_type_3d`, the dataset will encapsulate the box
            to its original format then converted them to `box_type_3d`.
            Defaults to 'LiDAR'. Available options includes
wangtai's avatar
wangtai committed
36

wangtai's avatar
wangtai committed
37
38
39
            - 'LiDAR': Box in LiDAR coordinates.
            - 'Depth': Box in depth coordinates, usually for indoor dataset.
            - 'Camera': Box in camera coordinates.
40
41
42
43
44
        filter_empty_gt (bool, optional): Whether to filter empty GT.
            Defaults to True.
        test_mode (bool, optional): Whether the dataset is in test mode.
            Defaults to False.
    """
45
46

    def __init__(self,
zhangwenwei's avatar
zhangwenwei committed
47
                 data_root,
48
49
                 ann_file,
                 pipeline=None,
liyinhao's avatar
liyinhao committed
50
                 classes=None,
zhangwenwei's avatar
zhangwenwei committed
51
                 modality=None,
52
                 box_type_3d='LiDAR',
wuyuefeng's avatar
Votenet  
wuyuefeng committed
53
                 filter_empty_gt=True,
54
55
                 test_mode=False,
                 file_client_args=dict(backend='disk')):
56
        super().__init__()
zhangwenwei's avatar
zhangwenwei committed
57
58
        self.data_root = data_root
        self.ann_file = ann_file
59
        self.test_mode = test_mode
zhangwenwei's avatar
zhangwenwei committed
60
        self.modality = modality
wuyuefeng's avatar
Votenet  
wuyuefeng committed
61
        self.filter_empty_gt = filter_empty_gt
wuyuefeng's avatar
Demo  
wuyuefeng committed
62
        self.box_type_3d, self.box_mode_3d = get_box_type(box_type_3d)
zhangwenwei's avatar
zhangwenwei committed
63
64

        self.CLASSES = self.get_classes(classes)
65
        self.file_client = mmcv.FileClient(**file_client_args)
66
        self.cat2id = {name: i for i, name in enumerate(self.CLASSES)}
67

68
69
70
71
72
73
74
75
76
77
78
79
80
        # load annotations
        if hasattr(self.file_client, 'get_local_path'):
            with self.file_client.get_local_path(self.ann_file) as local_path:
                self.data_infos = self.load_annotations(open(local_path, 'rb'))
        else:
            warnings.warn(
                'The used MMCV version does not have get_local_path. '
                f'We treat the {self.ann_file} as local paths and it '
                'might cause errors if the path is not a local path. '
                'Please use MMCV>= 1.3.16 if you meet errors.')
            self.data_infos = self.load_annotations(self.ann_file)

        # process pipeline
81
82
83
        if pipeline is not None:
            self.pipeline = Compose(pipeline)

84
        # set group flag for the samplers
zhangwenwei's avatar
zhangwenwei committed
85
86
87
88
        if not self.test_mode:
            self._set_group_flag()

    def load_annotations(self, ann_file):
89
90
91
92
93
94
95
96
        """Load annotations from ann_file.

        Args:
            ann_file (str): Path of the annotation file.

        Returns:
            list[dict]: List of annotations.
        """
97
98
        # loading data from a file-like object needs file format
        return mmcv.load(ann_file, file_format='pkl')
99
100

    def get_data_info(self, index):
101
102
103
104
105
106
        """Get data info according to the given index.

        Args:
            index (int): Index of the sample data to get.

        Returns:
107
            dict: Data information that will be passed to the data
zhangwenwei's avatar
zhangwenwei committed
108
                preprocessing pipelines. It includes the following keys:
109

wangtai's avatar
wangtai committed
110
111
112
113
                - sample_idx (str): Sample index.
                - pts_filename (str): Filename of point clouds.
                - file_name (str): Filename of point clouds.
                - ann_info (dict): Annotation info.
114
        """
115
116
        info = self.data_infos[index]
        sample_idx = info['point_cloud']['lidar_idx']
liyinhao's avatar
liyinhao committed
117
        pts_filename = osp.join(self.data_root, info['pts_path'])
118

liyinhao's avatar
liyinhao committed
119
120
121
122
        input_dict = dict(
            pts_filename=pts_filename,
            sample_idx=sample_idx,
            file_name=pts_filename)
123

zhangwenwei's avatar
zhangwenwei committed
124
        if not self.test_mode:
liyinhao's avatar
liyinhao committed
125
            annos = self.get_ann_info(index)
zhangwenwei's avatar
zhangwenwei committed
126
            input_dict['ann_info'] = annos
127
            if self.filter_empty_gt and ~(annos['gt_labels_3d'] != -1).any():
zhangwenwei's avatar
zhangwenwei committed
128
                return None
129
130
        return input_dict

zhangwenwei's avatar
zhangwenwei committed
131
    def pre_pipeline(self, results):
132
133
134
        """Initialization before data preparation.

        Args:
135
            results (dict): Dict before data preprocessing.
136

wangtai's avatar
wangtai committed
137
138
139
140
141
142
143
144
145
                - img_fields (list): Image fields.
                - bbox3d_fields (list): 3D bounding boxes fields.
                - pts_mask_fields (list): Mask fields of points.
                - pts_seg_fields (list): Mask fields of point segments.
                - bbox_fields (list): Fields of bounding boxes.
                - mask_fields (list): Fields of masks.
                - seg_fields (list): Segment fields.
                - box_type_3d (str): 3D box type.
                - box_mode_3d (str): 3D box mode.
146
        """
zhangwenwei's avatar
zhangwenwei committed
147
        results['img_fields'] = []
zhangwenwei's avatar
zhangwenwei committed
148
149
150
        results['bbox3d_fields'] = []
        results['pts_mask_fields'] = []
        results['pts_seg_fields'] = []
zhangwenwei's avatar
zhangwenwei committed
151
152
153
        results['bbox_fields'] = []
        results['mask_fields'] = []
        results['seg_fields'] = []
154
155
        results['box_type_3d'] = self.box_type_3d
        results['box_mode_3d'] = self.box_mode_3d
156

liyinhao's avatar
liyinhao committed
157
    def prepare_train_data(self, index):
158
159
160
161
162
163
        """Training data preparation.

        Args:
            index (int): Index for accessing the target data.

        Returns:
zhangwenwei's avatar
zhangwenwei committed
164
            dict: Training data dict of the corresponding index.
165
        """
liyinhao's avatar
liyinhao committed
166
        input_dict = self.get_data_info(index)
167
168
        if input_dict is None:
            return None
zhangwenwei's avatar
zhangwenwei committed
169
        self.pre_pipeline(input_dict)
170
        example = self.pipeline(input_dict)
171
172
173
        if self.filter_empty_gt and \
                (example is None or
                    ~(example['gt_labels_3d']._data != -1).any()):
174
175
176
            return None
        return example

177
    def prepare_test_data(self, index):
178
179
180
181
182
183
        """Prepare data for testing.

        Args:
            index (int): Index for accessing the target data.

        Returns:
zhangwenwei's avatar
zhangwenwei committed
184
            dict: Testing data dict of the corresponding index.
185
        """
186
        input_dict = self.get_data_info(index)
zhangwenwei's avatar
zhangwenwei committed
187
        self.pre_pipeline(input_dict)
188
189
        example = self.pipeline(input_dict)
        return example
190

liyinhao's avatar
liyinhao committed
191
192
    @classmethod
    def get_classes(cls, classes=None):
193
194
        """Get class names of current dataset.

liyinhao's avatar
liyinhao committed
195
        Args:
196
            classes (Sequence[str] | str): If classes is None, use
liyinhao's avatar
liyinhao committed
197
198
199
200
                default CLASSES defined by builtin dataset. If classes is a
                string, take it as a file name. The file contains the name of
                classes where each line contains one class name. If classes is
                a tuple or list, override the CLASSES defined by the dataset.
zhangwenwei's avatar
zhangwenwei committed
201
202

        Return:
wangtai's avatar
wangtai committed
203
            list[str]: A list of class names.
liyinhao's avatar
liyinhao committed
204
205
206
207
208
209
210
211
212
213
214
215
216
217
        """
        if classes is None:
            return cls.CLASSES

        if isinstance(classes, str):
            # take it as a file path
            class_names = mmcv.list_from_file(classes)
        elif isinstance(classes, (tuple, list)):
            class_names = classes
        else:
            raise ValueError(f'Unsupported type {type(classes)} of classes.')

        return class_names

liyinhao's avatar
liyinhao committed
218
219
220
221
    def format_results(self,
                       outputs,
                       pklfile_prefix=None,
                       submission_prefix=None):
222
223
224
225
        """Format the results to pkl file.

        Args:
            outputs (list[dict]): Testing results of the dataset.
226
            pklfile_prefix (str): The prefix of pkl files. It includes
227
228
229
230
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.

        Returns:
231
232
            tuple: (outputs, tmp_dir), outputs is the detection results,
                tmp_dir is the temporal directory created for saving json
zhangwenwei's avatar
zhangwenwei committed
233
                files when ``jsonfile_prefix`` is not specified.
234
        """
liyinhao's avatar
liyinhao committed
235
236
237
238
239
240
        if pklfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            pklfile_prefix = osp.join(tmp_dir.name, 'results')
            out = f'{pklfile_prefix}.pkl'
        mmcv.dump(outputs, out)
        return outputs, tmp_dir
241

liyinhao's avatar
liyinhao committed
242
243
244
245
246
247
    def evaluate(self,
                 results,
                 metric=None,
                 iou_thr=(0.25, 0.5),
                 logger=None,
                 show=False,
248
249
                 out_dir=None,
                 pipeline=None):
250
251
252
253
254
        """Evaluate.

        Evaluation in indoor protocol.

        Args:
liyinhao's avatar
liyinhao committed
255
            results (list[dict]): List of results.
256
257
258
259
260
261
            metric (str | list[str], optional): Metrics to be evaluated.
                Defaults to None.
            iou_thr (list[float]): AP IoU thresholds. Defaults to (0.25, 0.5).
            logger (logging.Logger | str, optional): Logger used for printing
                related information during evaluation. Defaults to None.
            show (bool, optional): Whether to visualize.
liyinhao's avatar
liyinhao committed
262
                Default: False.
263
            out_dir (str, optional): Path to save the visualization results.
liyinhao's avatar
liyinhao committed
264
                Default: None.
265
266
            pipeline (list[dict], optional): raw data loading for showing.
                Default: None.
wuyuefeng's avatar
Votenet  
wuyuefeng committed
267

liyinhao's avatar
liyinhao committed
268
269
        Returns:
            dict: Evaluation results.
270
271
        """
        from mmdet3d.core.evaluation import indoor_eval
liyinhao's avatar
liyinhao committed
272
273
        assert isinstance(
            results, list), f'Expect results to be list, got {type(results)}.'
zhangwenwei's avatar
zhangwenwei committed
274
        assert len(results) > 0, 'Expect length of results > 0.'
wuyuefeng's avatar
Votenet  
wuyuefeng committed
275
        assert len(results) == len(self.data_infos)
liyinhao's avatar
liyinhao committed
276
277
278
        assert isinstance(
            results[0], dict
        ), f'Expect elements in results to be dict, got {type(results[0])}.'
279
        gt_annos = [info['annos'] for info in self.data_infos]
zhangwenwei's avatar
zhangwenwei committed
280
        label2cat = {i: cat_id for i, cat_id in enumerate(self.CLASSES)}
zhangwenwei's avatar
zhangwenwei committed
281
        ret_dict = indoor_eval(
wuyuefeng's avatar
wuyuefeng committed
282
283
284
285
286
287
288
            gt_annos,
            results,
            iou_thr,
            label2cat,
            logger=logger,
            box_type_3d=self.box_type_3d,
            box_mode_3d=self.box_mode_3d)
liyinhao's avatar
liyinhao committed
289
        if show:
290
            self.show(results, out_dir, pipeline=pipeline)
wuyuefeng's avatar
wuyuefeng committed
291

liyinhao's avatar
liyinhao committed
292
        return ret_dict
zhangwenwei's avatar
zhangwenwei committed
293

294
295
296
297
298
299
300
301
302
    def _build_default_pipeline(self):
        """Build the default pipeline for this dataset."""
        raise NotImplementedError('_build_default_pipeline is not implemented '
                                  f'for dataset {self.__class__.__name__}')

    def _get_pipeline(self, pipeline):
        """Get data loading pipeline in self.show/evaluate function.

        Args:
303
            pipeline (list[dict]): Input pipeline. If None is given,
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
                get from self.pipeline.
        """
        if pipeline is None:
            if not hasattr(self, 'pipeline') or self.pipeline is None:
                warnings.warn(
                    'Use default pipeline for data loading, this may cause '
                    'errors when data is on ceph')
                return self._build_default_pipeline()
            loading_pipeline = get_loading_pipeline(self.pipeline.transforms)
            return Compose(loading_pipeline)
        return Compose(pipeline)

    def _extract_data(self, index, pipeline, key, load_annos=False):
        """Load data using input pipeline and extract data according to key.

        Args:
            index (int): Index for accessing the target data.
            pipeline (:obj:`Compose`): Composed data loading pipeline.
            key (str | list[str]): One single or a list of data key.
            load_annos (bool): Whether to load data annotations.
                If True, need to set self.test_mode as False before loading.

        Returns:
            np.ndarray | torch.Tensor | list[np.ndarray | torch.Tensor]:
                A single or a list of loaded data.
        """
        assert pipeline is not None, 'data loading pipeline is not provided'
        # when we want to load ground-truth via pipeline (e.g. bbox, seg mask)
        # we need to set self.test_mode as False so that we have 'annos'
        if load_annos:
            original_test_mode = self.test_mode
            self.test_mode = False
        input_dict = self.get_data_info(index)
        self.pre_pipeline(input_dict)
        example = pipeline(input_dict)

        # extract data items according to keys
        if isinstance(key, str):
342
            data = extract_result_dict(example, key)
343
        else:
344
            data = [extract_result_dict(example, k) for k in key]
345
346
347
348
349
        if load_annos:
            self.test_mode = original_test_mode

        return data

zhangwenwei's avatar
zhangwenwei committed
350
    def __len__(self):
351
352
353
354
355
        """Return the length of data infos.

        Returns:
            int: Length of data infos.
        """
zhangwenwei's avatar
zhangwenwei committed
356
357
358
        return len(self.data_infos)

    def _rand_another(self, idx):
359
360
361
362
363
        """Randomly get another item with the same flag.

        Returns:
            int: Another index of item with the same flag.
        """
zhangwenwei's avatar
zhangwenwei committed
364
365
366
367
        pool = np.where(self.flag == self.flag[idx])[0]
        return np.random.choice(pool)

    def __getitem__(self, idx):
368
369
370
371
372
        """Get item from infos according to the given index.

        Returns:
            dict: Data dictionary of the corresponding index.
        """
zhangwenwei's avatar
zhangwenwei committed
373
374
375
376
377
378
379
380
381
382
383
384
385
        if self.test_mode:
            return self.prepare_test_data(idx)
        while True:
            data = self.prepare_train_data(idx)
            if data is None:
                idx = self._rand_another(idx)
                continue
            return data

    def _set_group_flag(self):
        """Set flag according to image aspect ratio.

        Images with aspect ratio greater than 1 will be set as group 1,
386
387
        otherwise group 0. In 3D datasets, they are all the same, thus are all
        zeros.
zhangwenwei's avatar
zhangwenwei committed
388
389
        """
        self.flag = np.zeros(len(self), dtype=np.uint8)