kitti_dataset.py 28.1 KB
Newer Older
zhangwenwei's avatar
zhangwenwei committed
1
2
3
import copy
import mmcv
import numpy as np
zhangwenwei's avatar
zhangwenwei committed
4
5
import os
import tempfile
zhangwenwei's avatar
zhangwenwei committed
6
import torch
zhangwenwei's avatar
zhangwenwei committed
7
from mmcv.utils import print_log
zhangwenwei's avatar
zhangwenwei committed
8
from os import path as osp
zhangwenwei's avatar
zhangwenwei committed
9

zhangwenwei's avatar
zhangwenwei committed
10
from mmdet.datasets import DATASETS
liyinhao's avatar
liyinhao committed
11
from ..core import show_result
zhangwenwei's avatar
zhangwenwei committed
12
from ..core.bbox import Box3DMode, CameraInstance3DBoxes, points_cam2img
zhangwenwei's avatar
zhangwenwei committed
13
from .custom_3d import Custom3DDataset
zhangwenwei's avatar
zhangwenwei committed
14
15


16
@DATASETS.register_module()
zhangwenwei's avatar
zhangwenwei committed
17
class KittiDataset(Custom3DDataset):
zhangwenwei's avatar
zhangwenwei committed
18
    r"""KITTI Dataset.
wangtai's avatar
wangtai committed
19

zhangwenwei's avatar
zhangwenwei committed
20
21
    This class serves as the API for experiments on the `KITTI Dataset
    <http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d>`_.
wangtai's avatar
wangtai committed
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
        split (str): Split of input data.
        pts_prefix (str, optional): Prefix of points files.
            Defaults to 'velodyne'.
        pipeline (list[dict], optional): Pipeline used for data processing.
            Defaults to None.
        classes (tuple[str], optional): Classes used in the dataset.
            Defaults to None.
        modality (dict, optional): Modality to specify the sensor data used
            as input. Defaults to None.
        box_type_3d (str, optional): Type of 3D box of this dataset.
            Based on the `box_type_3d`, the dataset will encapsulate the box
            to its original format then converted them to `box_type_3d`.
            Defaults to 'LiDAR' in this dataset. Available options includes

wangtai's avatar
wangtai committed
40
41
42
            - 'LiDAR': Box in LiDAR coordinates.
            - 'Depth': Box in depth coordinates, usually for indoor dataset.
            - 'Camera': Box in camera coordinates.
wangtai's avatar
wangtai committed
43
44
45
46
47
        filter_empty_gt (bool, optional): Whether to filter empty GT.
            Defaults to True.
        test_mode (bool, optional): Whether the dataset is in test mode.
            Defaults to False.
    """
zhangwenwei's avatar
zhangwenwei committed
48
49
50
    CLASSES = ('car', 'pedestrian', 'cyclist')

    def __init__(self,
zhangwenwei's avatar
zhangwenwei committed
51
                 data_root,
zhangwenwei's avatar
zhangwenwei committed
52
53
                 ann_file,
                 split,
zhangwenwei's avatar
zhangwenwei committed
54
                 pts_prefix='velodyne',
zhangwenwei's avatar
zhangwenwei committed
55
                 pipeline=None,
zhangwenwei's avatar
zhangwenwei committed
56
                 classes=None,
zhangwenwei's avatar
zhangwenwei committed
57
                 modality=None,
58
59
                 box_type_3d='LiDAR',
                 filter_empty_gt=True,
zhangwenwei's avatar
zhangwenwei committed
60
                 test_mode=False):
zhangwenwei's avatar
zhangwenwei committed
61
62
63
64
65
66
        super().__init__(
            data_root=data_root,
            ann_file=ann_file,
            pipeline=pipeline,
            classes=classes,
            modality=modality,
67
68
            box_type_3d=box_type_3d,
            filter_empty_gt=filter_empty_gt,
zhangwenwei's avatar
zhangwenwei committed
69
70
71
            test_mode=test_mode)

        self.root_split = os.path.join(self.data_root, split)
zhangwenwei's avatar
zhangwenwei committed
72
73
        assert self.modality is not None
        self.pcd_limit_range = [0, -40, -3, 70.4, 40, 0.0]
zhangwenwei's avatar
zhangwenwei committed
74
        self.pts_prefix = pts_prefix
zhangwenwei's avatar
zhangwenwei committed
75

zhangwenwei's avatar
zhangwenwei committed
76
    def _get_pts_filename(self, idx):
77
78
79
80
81
82
83
84
        """Get point cloud filename according to the given index.

        Args:
            index (int): Index of the point cloud file to get.

        Returns:
            str: Name of the point cloud file.
        """
zhangwenwei's avatar
zhangwenwei committed
85
86
87
        pts_filename = osp.join(self.root_split, self.pts_prefix,
                                f'{idx:06d}.bin')
        return pts_filename
zhangwenwei's avatar
zhangwenwei committed
88

zhangwenwei's avatar
zhangwenwei committed
89
    def get_data_info(self, index):
90
91
92
93
94
95
        """Get data info according to the given index.

        Args:
            index (int): Index of the sample data to get.

        Returns:
zhangwenwei's avatar
zhangwenwei committed
96
97
            dict: Data information that will be passed to the data \
                preprocessing pipelines. It includes the following keys:
98

wangtai's avatar
wangtai committed
99
100
101
102
103
104
105
                - sample_idx (str): Sample index.
                - pts_filename (str): Filename of point clouds.
                - img_prefix (str | None): Prefix of image files.
                - img_info (dict): Image info.
                - lidar2img (list[np.ndarray], optional): Transformations \
                    from lidar to different cameras.
                - ann_info (dict): Annotation info.
106
        """
zhangwenwei's avatar
zhangwenwei committed
107
        info = self.data_infos[index]
zhangwenwei's avatar
zhangwenwei committed
108
        sample_idx = info['image']['image_idx']
zhangwenwei's avatar
zhangwenwei committed
109
        img_filename = os.path.join(self.data_root,
zhangwenwei's avatar
zhangwenwei committed
110
111
                                    info['image']['image_path'])

zhangwenwei's avatar
zhangwenwei committed
112
113
114
115
116
117
        # TODO: consider use torch.Tensor only
        rect = info['calib']['R0_rect'].astype(np.float32)
        Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)
        P2 = info['calib']['P2'].astype(np.float32)
        lidar2img = P2 @ rect @ Trv2c

zhangwenwei's avatar
zhangwenwei committed
118
        pts_filename = self._get_pts_filename(sample_idx)
zhangwenwei's avatar
zhangwenwei committed
119
120
        input_dict = dict(
            sample_idx=sample_idx,
zhangwenwei's avatar
zhangwenwei committed
121
            pts_filename=pts_filename,
zhangwenwei's avatar
zhangwenwei committed
122
123
            img_prefix=None,
            img_info=dict(filename=img_filename),
zhangwenwei's avatar
zhangwenwei committed
124
125
126
            lidar2img=lidar2img)

        if not self.test_mode:
zhangwenwei's avatar
zhangwenwei committed
127
            annos = self.get_ann_info(index)
zhangwenwei's avatar
zhangwenwei committed
128
            input_dict['ann_info'] = annos
zhangwenwei's avatar
zhangwenwei committed
129
130
131
132

        return input_dict

    def get_ann_info(self, index):
133
134
135
136
137
138
        """Get annotation info according to the given index.

        Args:
            index (int): Index of the annotation data to get.

        Returns:
zhangwenwei's avatar
zhangwenwei committed
139
            dict: annotation information consists of the following keys:
140

zhangwenwei's avatar
zhangwenwei committed
141
                - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): \
wangtai's avatar
wangtai committed
142
143
144
145
146
                    3D ground truth bboxes.
                - gt_labels_3d (np.ndarray): Labels of ground truths.
                - gt_bboxes (np.ndarray): 2D ground truth bboxes.
                - gt_labels (np.ndarray): Labels of ground truths.
                - gt_names (list[str]): Class names of ground truths.
147
        """
zhangwenwei's avatar
zhangwenwei committed
148
        # Use index to get the annos, thus the evalhook could also use this api
zhangwenwei's avatar
zhangwenwei committed
149
        info = self.data_infos[index]
zhangwenwei's avatar
zhangwenwei committed
150
151
152
153
154
        rect = info['calib']['R0_rect'].astype(np.float32)
        Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)

        annos = info['annos']
        # we need other objects to avoid collision when sample
155
        annos = self.remove_dontcare(annos)
zhangwenwei's avatar
zhangwenwei committed
156
157
158
159
160
161
162
        loc = annos['location']
        dims = annos['dimensions']
        rots = annos['rotation_y']
        gt_names = annos['name']
        # print(gt_names, len(loc))
        gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]],
                                      axis=1).astype(np.float32)
163
164
165

        # convert gt_bboxes_3d to velodyne coordinates
        gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to(
166
            self.box_mode_3d, np.linalg.inv(rect @ Trv2c))
zhangwenwei's avatar
zhangwenwei committed
167
168
169
        gt_bboxes = annos['bbox']

        selected = self.drop_arrays_by_name(gt_names, ['DontCare'])
170
        # gt_bboxes_3d = gt_bboxes_3d[selected].astype('float32')
zhangwenwei's avatar
zhangwenwei committed
171
172
173
174
175
176
177
178
179
180
181
        gt_bboxes = gt_bboxes[selected].astype('float32')
        gt_names = gt_names[selected]

        gt_labels = []
        for cat in gt_names:
            if cat in self.CLASSES:
                gt_labels.append(self.CLASSES.index(cat))
            else:
                gt_labels.append(-1)
        gt_labels = np.array(gt_labels)
        gt_labels_3d = copy.deepcopy(gt_labels)
zhangwenwei's avatar
zhangwenwei committed
182
183
184

        anns_results = dict(
            gt_bboxes_3d=gt_bboxes_3d,
zhangwenwei's avatar
zhangwenwei committed
185
            gt_labels_3d=gt_labels_3d,
zhangwenwei's avatar
zhangwenwei committed
186
            bboxes=gt_bboxes,
liyinhao's avatar
liyinhao committed
187
188
            labels=gt_labels,
            gt_names=gt_names)
zhangwenwei's avatar
zhangwenwei committed
189
190
191
        return anns_results

    def drop_arrays_by_name(self, gt_names, used_classes):
192
193
194
195
196
197
198
199
200
        """Drop irrelevant ground truths by name.

        Args:
            gt_names (list[str]): Names of ground truths.
            used_classes (list[str]): Classes of interest.

        Returns:
            np.ndarray: Indices of ground truths that will be dropped.
        """
zhangwenwei's avatar
zhangwenwei committed
201
202
203
204
205
        inds = [i for i, x in enumerate(gt_names) if x not in used_classes]
        inds = np.array(inds, dtype=np.int64)
        return inds

    def keep_arrays_by_name(self, gt_names, used_classes):
206
207
208
209
210
211
212
213
214
        """Keep useful ground truths by name.

        Args:
            gt_names (list[str]): Names of ground truths.
            used_classes (list[str]): Classes of interest.

        Returns:
            np.ndarray: Indices of ground truths that will be keeped.
        """
zhangwenwei's avatar
zhangwenwei committed
215
216
217
218
        inds = [i for i, x in enumerate(gt_names) if x in used_classes]
        inds = np.array(inds, dtype=np.int64)
        return inds

219
    def remove_dontcare(self, ann_info):
220
221
222
223
224
225
226
227
228
        """Remove annotations that do not need to be cared.

        Args:
            ann_info (dict): Dict of annotation infos. The ``'DontCare'``
                annotations will be removed according to ann_file['name'].

        Returns:
            dict: Annotations after filtering.
        """
229
230
231
232
233
234
235
236
237
        img_filtered_annotations = {}
        relevant_annotation_indices = [
            i for i, x in enumerate(ann_info['name']) if x != 'DontCare'
        ]
        for key in ann_info.keys():
            img_filtered_annotations[key] = (
                ann_info[key][relevant_annotation_indices])
        return img_filtered_annotations

238
239
240
241
    def format_results(self,
                       outputs,
                       pklfile_prefix=None,
                       submission_prefix=None):
242
243
244
245
246
247
248
249
250
251
252
253
254
        """Format the results to pkl file.

        Args:
            outputs (list[dict]): Testing results of the dataset.
            pklfile_prefix (str | None): The prefix of pkl files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            submission_prefix (str | None): The prefix of submitted files. It
                includes the file path and the prefix of filename, e.g.,
                "a/b/prefix". If not specified, a temp file will be created.
                Default: None.

        Returns:
zhangwenwei's avatar
zhangwenwei committed
255
256
            tuple: (result_files, tmp_dir), result_files is a dict containing \
                the json filepaths, tmp_dir is the temporal directory created \
257
258
                for saving json files when jsonfile_prefix is not specified.
        """
259
260
261
262
263
264
        if pklfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            pklfile_prefix = osp.join(tmp_dir.name, 'results')
        else:
            tmp_dir = None

zhangwenwei's avatar
zhangwenwei committed
265
        if not isinstance(outputs[0], dict):
zhangwenwei's avatar
zhangwenwei committed
266
            result_files = self.bbox2result_kitti2d(outputs, self.CLASSES,
zhangwenwei's avatar
zhangwenwei committed
267
                                                    pklfile_prefix,
268
                                                    submission_prefix)
zhangwenwei's avatar
zhangwenwei committed
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
        elif 'pts_bbox' in outputs[0] or 'img_bbox' in outputs[0]:
            result_files = dict()
            for name in outputs[0]:
                results_ = [out[name] for out in outputs]
                pklfile_prefix_ = pklfile_prefix + name
                if submission_prefix is not None:
                    submission_prefix_ = submission_prefix + name
                else:
                    submission_prefix_ = None
                if 'img' in name:
                    result_files = self.bbox2result_kitti2d(
                        results_, self.CLASSES, pklfile_prefix_,
                        submission_prefix_)
                else:
                    result_files_ = self.bbox2result_kitti(
                        results_, self.CLASSES, pklfile_prefix_,
                        submission_prefix_)
                result_files[name] = result_files_
zhangwenwei's avatar
zhangwenwei committed
287
        else:
zhangwenwei's avatar
zhangwenwei committed
288
            result_files = self.bbox2result_kitti(outputs, self.CLASSES,
289
290
                                                  pklfile_prefix,
                                                  submission_prefix)
zhangwenwei's avatar
zhangwenwei committed
291
        return result_files, tmp_dir
zhangwenwei's avatar
zhangwenwei committed
292

293
294
295
296
297
    def evaluate(self,
                 results,
                 metric=None,
                 logger=None,
                 pklfile_prefix=None,
liyinhao's avatar
liyinhao committed
298
299
300
                 submission_prefix=None,
                 show=False,
                 out_dir=None):
301
302
303
        """Evaluation in KITTI protocol.

        Args:
wangtai's avatar
wangtai committed
304
            results (list[dict]): Testing results of the dataset.
305
306
307
308
309
310
311
312
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            pklfile_prefix (str | None): The prefix of pkl files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            submission_prefix (str | None): The prefix of submission datas.
                If not specified, the submission data will not be generated.
liyinhao's avatar
liyinhao committed
313
314
315
316
            show (bool): Whether to visualize.
                Default: False.
            out_dir (str): Path to save the visualization results.
                Default: None.
317
318

        Returns:
wangtai's avatar
wangtai committed
319
            dict[str, float]: Results of each evaluation metric.
320
321
        """
        result_files, tmp_dir = self.format_results(results, pklfile_prefix)
zhangwenwei's avatar
zhangwenwei committed
322
        from mmdet3d.core.evaluation import kitti_eval
zhangwenwei's avatar
zhangwenwei committed
323
        gt_annos = [info['annos'] for info in self.data_infos]
zhangwenwei's avatar
zhangwenwei committed
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341

        if isinstance(result_files, dict):
            ap_dict = dict()
            for name, result_files_ in result_files.items():
                eval_types = ['bbox', 'bev', '3d']
                if 'img' in name:
                    eval_types = ['bbox']
                ap_result_str, ap_dict_ = kitti_eval(
                    gt_annos,
                    result_files_,
                    self.CLASSES,
                    eval_types=eval_types)
                for ap_type, ap in ap_dict_.items():
                    ap_dict[f'{name}/{ap_type}'] = float('{:.4f}'.format(ap))

                print_log(
                    f'Results of {name}:\n' + ap_result_str, logger=logger)

zhangwenwei's avatar
zhangwenwei committed
342
        else:
zhangwenwei's avatar
zhangwenwei committed
343
344
345
346
347
348
349
350
            if metric == 'img_bbox':
                ap_result_str, ap_dict = kitti_eval(
                    gt_annos, result_files, self.CLASSES, eval_types=['bbox'])
            else:
                ap_result_str, ap_dict = kitti_eval(gt_annos, result_files,
                                                    self.CLASSES)
            print_log('\n' + ap_result_str, logger=logger)

351
352
        if tmp_dir is not None:
            tmp_dir.cleanup()
liyinhao's avatar
liyinhao committed
353
354
        if show:
            self.show(results, out_dir)
355
        return ap_dict
356
357
358
359
360
361

    def bbox2result_kitti(self,
                          net_outputs,
                          class_names,
                          pklfile_prefix=None,
                          submission_prefix=None):
362
363
364
365
366
367
368
369
370
371
372
373
374
        """Convert 3D detection results to kitti format for evaluation and test
        submission.

        Args:
            net_outputs (list[np.ndarray]): List of array storing the \
                inferenced bounding boxes and scores.
            class_names (list[String]): A list of class names.
            pklfile_prefix (str | None): The prefix of pkl file.
            submission_prefix (str | None): The prefix of submission file.

        Returns:
            list[dict]: A list of dictionaries with the kitti format.
        """
zhangwenwei's avatar
zhangwenwei committed
375
        assert len(net_outputs) == len(self.data_infos)
376
377
        if submission_prefix is not None:
            mmcv.mkdir_or_exist(submission_prefix)
zhangwenwei's avatar
zhangwenwei committed
378
379

        det_annos = []
zhangwenwei's avatar
zhangwenwei committed
380
        print('\nConverting prediction to KITTI format')
zhangwenwei's avatar
zhangwenwei committed
381
382
383
        for idx, pred_dicts in enumerate(
                mmcv.track_iter_progress(net_outputs)):
            annos = []
zhangwenwei's avatar
zhangwenwei committed
384
            info = self.data_infos[idx]
zhangwenwei's avatar
zhangwenwei committed
385
            sample_idx = info['image']['image_idx']
zhangwenwei's avatar
zhangwenwei committed
386
            image_shape = info['image']['image_shape'][:2]
zhangwenwei's avatar
zhangwenwei committed
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460

            box_dict = self.convert_valid_bboxes(pred_dicts, info)
            if len(box_dict['bbox']) > 0:
                box_2d_preds = box_dict['bbox']
                box_preds = box_dict['box3d_camera']
                scores = box_dict['scores']
                box_preds_lidar = box_dict['box3d_lidar']
                label_preds = box_dict['label_preds']

                anno = {
                    'name': [],
                    'truncated': [],
                    'occluded': [],
                    'alpha': [],
                    'bbox': [],
                    'dimensions': [],
                    'location': [],
                    'rotation_y': [],
                    'score': []
                }

                for box, box_lidar, bbox, score, label in zip(
                        box_preds, box_preds_lidar, box_2d_preds, scores,
                        label_preds):
                    bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])
                    bbox[:2] = np.maximum(bbox[:2], [0, 0])
                    anno['name'].append(class_names[int(label)])
                    anno['truncated'].append(0.0)
                    anno['occluded'].append(0)
                    anno['alpha'].append(
                        -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6])
                    anno['bbox'].append(bbox)
                    anno['dimensions'].append(box[3:6])
                    anno['location'].append(box[:3])
                    anno['rotation_y'].append(box[6])
                    anno['score'].append(score)

                anno = {k: np.stack(v) for k, v in anno.items()}
                annos.append(anno)

                if submission_prefix is not None:
                    curr_file = f'{submission_prefix}/{sample_idx:06d}.txt'
                    with open(curr_file, 'w') as f:
                        bbox = anno['bbox']
                        loc = anno['location']
                        dims = anno['dimensions']  # lhw -> hwl

                        for idx in range(len(bbox)):
                            print(
                                '{} -1 -1 {:.4f} {:.4f} {:.4f} {:.4f} '
                                '{:.4f} {:.4f} {:.4f} '
                                '{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.
                                format(anno['name'][idx], anno['alpha'][idx],
                                       bbox[idx][0], bbox[idx][1],
                                       bbox[idx][2], bbox[idx][3],
                                       dims[idx][1], dims[idx][2],
                                       dims[idx][0], loc[idx][0], loc[idx][1],
                                       loc[idx][2], anno['rotation_y'][idx],
                                       anno['score'][idx]),
                                file=f)
            else:
                annos.append({
                    'name': np.array([]),
                    'truncated': np.array([]),
                    'occluded': np.array([]),
                    'alpha': np.array([]),
                    'bbox': np.zeros([0, 4]),
                    'dimensions': np.zeros([0, 3]),
                    'location': np.zeros([0, 3]),
                    'rotation_y': np.array([]),
                    'score': np.array([]),
                })
            annos[-1]['sample_idx'] = np.array(
                [sample_idx] * len(annos[-1]['score']), dtype=np.int64)
zhangwenwei's avatar
zhangwenwei committed
461
462
463

            det_annos += annos

464
465
466
        if pklfile_prefix is not None:
            if not pklfile_prefix.endswith(('.pkl', '.pickle')):
                out = f'{pklfile_prefix}.pkl'
zhangwenwei's avatar
zhangwenwei committed
467
468
469
470
471
472
473
474
            mmcv.dump(det_annos, out)
            print('Result is saved to %s' % out)

        return det_annos

    def bbox2result_kitti2d(self,
                            net_outputs,
                            class_names,
475
476
                            pklfile_prefix=None,
                            submission_prefix=None):
zhangwenwei's avatar
zhangwenwei committed
477
478
        """Convert 2D detection results to kitti format for evaluation and test
        submission.
zhangwenwei's avatar
zhangwenwei committed
479
480

        Args:
481
482
483
            net_outputs (list[np.ndarray]): List of array storing the \
                inferenced bounding boxes and scores.
            class_names (list[String]): A list of class names.
484
485
            pklfile_prefix (str | None): The prefix of pkl file.
            submission_prefix (str | None): The prefix of submission file.
zhangwenwei's avatar
zhangwenwei committed
486

487
        Returns:
488
            list[dict]: A list of dictionaries have the kitti format
zhangwenwei's avatar
zhangwenwei committed
489
        """
zhangwenwei's avatar
zhangwenwei committed
490
        assert len(net_outputs) == len(self.data_infos)
zhangwenwei's avatar
zhangwenwei committed
491
492

        det_annos = []
zhangwenwei's avatar
zhangwenwei committed
493
        print('\nConverting prediction to KITTI format')
zhangwenwei's avatar
zhangwenwei committed
494
495
496
497
498
499
500
501
502
503
504
505
506
        for i, bboxes_per_sample in enumerate(
                mmcv.track_iter_progress(net_outputs)):
            annos = []
            anno = dict(
                name=[],
                truncated=[],
                occluded=[],
                alpha=[],
                bbox=[],
                dimensions=[],
                location=[],
                rotation_y=[],
                score=[])
zhangwenwei's avatar
zhangwenwei committed
507
            sample_idx = self.data_infos[i]['image']['image_idx']
zhangwenwei's avatar
zhangwenwei committed
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548

            num_example = 0
            for label in range(len(bboxes_per_sample)):
                bbox = bboxes_per_sample[label]
                for i in range(bbox.shape[0]):
                    anno['name'].append(class_names[int(label)])
                    anno['truncated'].append(0.0)
                    anno['occluded'].append(0)
                    anno['alpha'].append(0.0)
                    anno['bbox'].append(bbox[i, :4])
                    # set dimensions (height, width, length) to zero
                    anno['dimensions'].append(
                        np.zeros(shape=[3], dtype=np.float32))
                    # set the 3D translation to (-1000, -1000, -1000)
                    anno['location'].append(
                        np.ones(shape=[3], dtype=np.float32) * (-1000.0))
                    anno['rotation_y'].append(0.0)
                    anno['score'].append(bbox[i, 4])
                    num_example += 1

            if num_example == 0:
                annos.append(
                    dict(
                        name=np.array([]),
                        truncated=np.array([]),
                        occluded=np.array([]),
                        alpha=np.array([]),
                        bbox=np.zeros([0, 4]),
                        dimensions=np.zeros([0, 3]),
                        location=np.zeros([0, 3]),
                        rotation_y=np.array([]),
                        score=np.array([]),
                    ))
            else:
                anno = {k: np.stack(v) for k, v in anno.items()}
                annos.append(anno)

            annos[-1]['sample_idx'] = np.array(
                [sample_idx] * num_example, dtype=np.int64)
            det_annos += annos

549
550
551
552
553
554
555
556
        if pklfile_prefix is not None:
            # save file in pkl format
            pklfile_path = (
                pklfile_prefix[:-4] if pklfile_prefix.endswith(
                    ('.pkl', '.pickle')) else pklfile_prefix)
            mmcv.dump(det_annos, pklfile_path)

        if submission_prefix is not None:
zhangwenwei's avatar
zhangwenwei committed
557
            # save file in submission format
558
559
            mmcv.mkdir_or_exist(submission_prefix)
            print(f'Saving KITTI submission to {submission_prefix}')
zhangwenwei's avatar
zhangwenwei committed
560
            for i, anno in enumerate(det_annos):
zhangwenwei's avatar
zhangwenwei committed
561
                sample_idx = self.data_infos[i]['image']['image_idx']
562
                cur_det_file = f'{submission_prefix}/{sample_idx:06d}.txt'
zhangwenwei's avatar
zhangwenwei committed
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
                with open(cur_det_file, 'w') as f:
                    bbox = anno['bbox']
                    loc = anno['location']
                    dims = anno['dimensions'][::-1]  # lhw -> hwl
                    for idx in range(len(bbox)):
                        print(
                            '{} -1 -1 {:4f} {:4f} {:4f} {:4f} {:4f} {:4f} '
                            '{:4f} {:4f} {:4f} {:4f} {:4f} {:4f} {:4f}'.format(
                                anno['name'][idx],
                                anno['alpha'][idx],
                                *bbox[idx],  # 4 float
                                *dims[idx],  # 3 float
                                *loc[idx],  # 3 float
                                anno['rotation_y'][idx],
                                anno['score'][idx]),
                            file=f,
                        )
580
            print('Result is saved to {}'.format(submission_prefix))
zhangwenwei's avatar
zhangwenwei committed
581
582
583
584

        return det_annos

    def convert_valid_bboxes(self, box_dict, info):
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
        """Convert the predicted boxes into valid ones.

        Args:
            box_dict (dict): Box dictionaries to be converted.

                - boxes_3d (:obj:`LiDARInstance3DBoxes`): 3D bounding boxes.
                - scores_3d (torch.Tensor): Scores of boxes.
                - labels_3d (torch.Tensor): Class labels of boxes.
            info (dict): Data info.

        Returns:
            dict: Valid predicted boxes.

                - bbox (np.ndarray): 2D bounding boxes.
                - box3d_camera (np.ndarray): 3D bounding boxes in \
                    camera coordinate.
                - box3d_lidar (np.ndarray): 3D bounding boxes in \
                    LiDAR coordinate.
                - scores (np.ndarray): Scores of boxes.
                - label_preds (np.ndarray): Class label predictions.
                - sample_idx (int): Sample index.
        """
zhangwenwei's avatar
zhangwenwei committed
607
        # TODO: refactor this function
608
609
610
        box_preds = box_dict['boxes_3d']
        scores = box_dict['scores_3d']
        labels = box_dict['labels_3d']
zhangwenwei's avatar
zhangwenwei committed
611
        sample_idx = info['image']['image_idx']
612
613
614
        # TODO: remove the hack of yaw
        box_preds.tensor[:, -1] = box_preds.tensor[:, -1] - np.pi
        box_preds.limit_yaw(offset=0.5, period=np.pi * 2)
zhangwenwei's avatar
zhangwenwei committed
615

616
        if len(box_preds) == 0:
zhangwenwei's avatar
zhangwenwei committed
617
            return dict(
618
619
620
621
622
623
                bbox=np.zeros([0, 4]),
                box3d_camera=np.zeros([0, 7]),
                box3d_lidar=np.zeros([0, 7]),
                scores=np.zeros([0]),
                label_preds=np.zeros([0, 4]),
                sample_idx=sample_idx)
zhangwenwei's avatar
zhangwenwei committed
624
625
626
627
628

        rect = info['calib']['R0_rect'].astype(np.float32)
        Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)
        P2 = info['calib']['P2'].astype(np.float32)
        img_shape = info['image']['image_shape']
629
630
631
632
633
        P2 = box_preds.tensor.new_tensor(P2)

        box_preds_camera = box_preds.convert_to(Box3DMode.CAM, rect @ Trv2c)

        box_corners = box_preds_camera.corners
zhangwenwei's avatar
zhangwenwei committed
634
        box_corners_in_image = points_cam2img(box_corners, P2)
zhangwenwei's avatar
zhangwenwei committed
635
636
637
638
639
        # box_corners_in_image: [N, 8, 2]
        minxy = torch.min(box_corners_in_image, dim=1)[0]
        maxxy = torch.max(box_corners_in_image, dim=1)[0]
        box_2d_preds = torch.cat([minxy, maxxy], dim=1)
        # Post-processing
640
641
642
643
644
645
646
647
648
649
        # check box_preds_camera
        image_shape = box_preds.tensor.new_tensor(img_shape)
        valid_cam_inds = ((box_preds_camera.tensor[:, 0] < image_shape[1]) &
                          (box_preds_camera.tensor[:, 1] < image_shape[0]) &
                          (box_preds_camera.tensor[:, 2] > 0) &
                          (box_preds_camera.tensor[:, 3] > 0))
        # check box_preds
        limit_range = box_preds.tensor.new_tensor(self.pcd_limit_range)
        valid_pcd_inds = ((box_preds.center > limit_range[:3]) &
                          (box_preds.center < limit_range[3:]))
zhangwenwei's avatar
zhangwenwei committed
650
651
652
653
654
        valid_inds = valid_cam_inds & valid_pcd_inds.all(-1)

        if valid_inds.sum() > 0:
            return dict(
                bbox=box_2d_preds[valid_inds, :].numpy(),
655
656
657
658
                box3d_camera=box_preds_camera[valid_inds].tensor.numpy(),
                box3d_lidar=box_preds[valid_inds].tensor.numpy(),
                scores=scores[valid_inds].numpy(),
                label_preds=labels[valid_inds].numpy(),
zhangwenwei's avatar
zhangwenwei committed
659
660
661
662
                sample_idx=sample_idx,
            )
        else:
            return dict(
663
664
665
666
667
                bbox=np.zeros([0, 4]),
                box3d_camera=np.zeros([0, 7]),
                box3d_lidar=np.zeros([0, 7]),
                scores=np.zeros([0]),
                label_preds=np.zeros([0, 4]),
zhangwenwei's avatar
zhangwenwei committed
668
669
                sample_idx=sample_idx,
            )
liyinhao's avatar
liyinhao committed
670
671

    def show(self, results, out_dir):
672
673
674
        """Results visualization.

        Args:
wangtai's avatar
wangtai committed
675
            results (list[dict]): List of bounding boxes results.
676
677
            out_dir (str): Output directory of visualization result.
        """
liyinhao's avatar
liyinhao committed
678
679
        assert out_dir is not None, 'Expect out_dir, got none.'
        for i, result in enumerate(results):
liyinhao's avatar
liyinhao committed
680
            example = self.prepare_test_data(i)
liyinhao's avatar
liyinhao committed
681
682
683
            data_info = self.data_infos[i]
            pts_path = data_info['point_cloud']['velodyne_path']
            file_name = osp.split(pts_path)[-1].split('.')[0]
liyinhao's avatar
liyinhao committed
684
            # for now we convert points into depth mode
liyinhao's avatar
liyinhao committed
685
            points = example['points'][0]._data.numpy()
liyinhao's avatar
liyinhao committed
686
687
688
689
690
691
692
693
694
695
696
            points = points[..., [1, 0, 2]]
            points[..., 0] *= -1
            gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor
            gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR,
                                          Box3DMode.DEPTH)
            gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2
            pred_bboxes = result['boxes_3d'].tensor.numpy()
            pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR,
                                            Box3DMode.DEPTH)
            pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
            show_result(points, gt_bboxes, pred_bboxes, out_dir, file_name)