kitti_dataset.py 23.5 KB
Newer Older
zhangwenwei's avatar
zhangwenwei committed
1
2
3
import copy
import mmcv
import numpy as np
zhangwenwei's avatar
zhangwenwei committed
4
5
import os
import tempfile
zhangwenwei's avatar
zhangwenwei committed
6
import torch
zhangwenwei's avatar
zhangwenwei committed
7
from mmcv.utils import print_log
zhangwenwei's avatar
zhangwenwei committed
8
from os import path as osp
zhangwenwei's avatar
zhangwenwei committed
9

zhangwenwei's avatar
zhangwenwei committed
10
from mmdet.datasets import DATASETS
liyinhao's avatar
liyinhao committed
11
from ..core import show_result
zhangwenwei's avatar
zhangwenwei committed
12
from ..core.bbox import Box3DMode, CameraInstance3DBoxes, points_cam2img
zhangwenwei's avatar
zhangwenwei committed
13
from .custom_3d import Custom3DDataset
zhangwenwei's avatar
zhangwenwei committed
14
15


16
@DATASETS.register_module()
zhangwenwei's avatar
zhangwenwei committed
17
class KittiDataset(Custom3DDataset):
zhangwenwei's avatar
zhangwenwei committed
18
    """KITTI Dataset.
wangtai's avatar
wangtai committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51

    This class serves as the API for experiments on the KITTI Dataset.

    Please refer to `<http://www.cvlibs.net/datasets/kitti/eval_object.php?
    obj_benchmark=3d>`_for data downloading. It is recommended to symlink
    the dataset root to $MMDETECTION3D/data and organize them as the doc
    shows.

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
        split (str): Split of input data.
        pts_prefix (str, optional): Prefix of points files.
            Defaults to 'velodyne'.
        pipeline (list[dict], optional): Pipeline used for data processing.
            Defaults to None.
        classes (tuple[str], optional): Classes used in the dataset.
            Defaults to None.
        modality (dict, optional): Modality to specify the sensor data used
            as input. Defaults to None.
        box_type_3d (str, optional): Type of 3D box of this dataset.
            Based on the `box_type_3d`, the dataset will encapsulate the box
            to its original format then converted them to `box_type_3d`.
            Defaults to 'LiDAR' in this dataset. Available options includes

            - 'LiDAR': box in LiDAR coordinates
            - 'Depth': box in depth coordinates, usually for indoor dataset
            - 'Camera': box in camera coordinates
        filter_empty_gt (bool, optional): Whether to filter empty GT.
            Defaults to True.
        test_mode (bool, optional): Whether the dataset is in test mode.
            Defaults to False.
    """
zhangwenwei's avatar
zhangwenwei committed
52
53
54
    CLASSES = ('car', 'pedestrian', 'cyclist')

    def __init__(self,
zhangwenwei's avatar
zhangwenwei committed
55
                 data_root,
zhangwenwei's avatar
zhangwenwei committed
56
57
                 ann_file,
                 split,
zhangwenwei's avatar
zhangwenwei committed
58
                 pts_prefix='velodyne',
zhangwenwei's avatar
zhangwenwei committed
59
                 pipeline=None,
zhangwenwei's avatar
zhangwenwei committed
60
                 classes=None,
zhangwenwei's avatar
zhangwenwei committed
61
                 modality=None,
62
63
                 box_type_3d='LiDAR',
                 filter_empty_gt=True,
zhangwenwei's avatar
zhangwenwei committed
64
                 test_mode=False):
zhangwenwei's avatar
zhangwenwei committed
65
66
67
68
69
70
        super().__init__(
            data_root=data_root,
            ann_file=ann_file,
            pipeline=pipeline,
            classes=classes,
            modality=modality,
71
72
            box_type_3d=box_type_3d,
            filter_empty_gt=filter_empty_gt,
zhangwenwei's avatar
zhangwenwei committed
73
74
75
            test_mode=test_mode)

        self.root_split = os.path.join(self.data_root, split)
zhangwenwei's avatar
zhangwenwei committed
76
77
        assert self.modality is not None
        self.pcd_limit_range = [0, -40, -3, 70.4, 40, 0.0]
zhangwenwei's avatar
zhangwenwei committed
78
        self.pts_prefix = pts_prefix
zhangwenwei's avatar
zhangwenwei committed
79

zhangwenwei's avatar
zhangwenwei committed
80
81
82
83
    def _get_pts_filename(self, idx):
        pts_filename = osp.join(self.root_split, self.pts_prefix,
                                f'{idx:06d}.bin')
        return pts_filename
zhangwenwei's avatar
zhangwenwei committed
84

zhangwenwei's avatar
zhangwenwei committed
85
86
    def get_data_info(self, index):
        info = self.data_infos[index]
zhangwenwei's avatar
zhangwenwei committed
87
        sample_idx = info['image']['image_idx']
zhangwenwei's avatar
zhangwenwei committed
88
        img_filename = os.path.join(self.data_root,
zhangwenwei's avatar
zhangwenwei committed
89
90
                                    info['image']['image_path'])

zhangwenwei's avatar
zhangwenwei committed
91
92
93
94
95
96
        # TODO: consider use torch.Tensor only
        rect = info['calib']['R0_rect'].astype(np.float32)
        Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)
        P2 = info['calib']['P2'].astype(np.float32)
        lidar2img = P2 @ rect @ Trv2c

zhangwenwei's avatar
zhangwenwei committed
97
        pts_filename = self._get_pts_filename(sample_idx)
zhangwenwei's avatar
zhangwenwei committed
98
99
        input_dict = dict(
            sample_idx=sample_idx,
zhangwenwei's avatar
zhangwenwei committed
100
            pts_filename=pts_filename,
zhangwenwei's avatar
zhangwenwei committed
101
102
            img_prefix=None,
            img_info=dict(filename=img_filename),
zhangwenwei's avatar
zhangwenwei committed
103
104
105
            lidar2img=lidar2img)

        if not self.test_mode:
zhangwenwei's avatar
zhangwenwei committed
106
            annos = self.get_ann_info(index)
zhangwenwei's avatar
zhangwenwei committed
107
            input_dict['ann_info'] = annos
zhangwenwei's avatar
zhangwenwei committed
108
109
110
111
112

        return input_dict

    def get_ann_info(self, index):
        # Use index to get the annos, thus the evalhook could also use this api
zhangwenwei's avatar
zhangwenwei committed
113
        info = self.data_infos[index]
zhangwenwei's avatar
zhangwenwei committed
114
115
116
117
118
        rect = info['calib']['R0_rect'].astype(np.float32)
        Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)

        annos = info['annos']
        # we need other objects to avoid collision when sample
119
        annos = self.remove_dontcare(annos)
zhangwenwei's avatar
zhangwenwei committed
120
121
122
123
124
125
126
        loc = annos['location']
        dims = annos['dimensions']
        rots = annos['rotation_y']
        gt_names = annos['name']
        # print(gt_names, len(loc))
        gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]],
                                      axis=1).astype(np.float32)
127
128
129

        # convert gt_bboxes_3d to velodyne coordinates
        gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to(
130
            self.box_mode_3d, np.linalg.inv(rect @ Trv2c))
zhangwenwei's avatar
zhangwenwei committed
131
132
133
        gt_bboxes = annos['bbox']

        selected = self.drop_arrays_by_name(gt_names, ['DontCare'])
134
        # gt_bboxes_3d = gt_bboxes_3d[selected].astype('float32')
zhangwenwei's avatar
zhangwenwei committed
135
136
137
138
139
140
141
142
143
144
145
        gt_bboxes = gt_bboxes[selected].astype('float32')
        gt_names = gt_names[selected]

        gt_labels = []
        for cat in gt_names:
            if cat in self.CLASSES:
                gt_labels.append(self.CLASSES.index(cat))
            else:
                gt_labels.append(-1)
        gt_labels = np.array(gt_labels)
        gt_labels_3d = copy.deepcopy(gt_labels)
zhangwenwei's avatar
zhangwenwei committed
146
147
148

        anns_results = dict(
            gt_bboxes_3d=gt_bboxes_3d,
zhangwenwei's avatar
zhangwenwei committed
149
            gt_labels_3d=gt_labels_3d,
zhangwenwei's avatar
zhangwenwei committed
150
            bboxes=gt_bboxes,
liyinhao's avatar
liyinhao committed
151
152
            labels=gt_labels,
            gt_names=gt_names)
zhangwenwei's avatar
zhangwenwei committed
153
154
155
156
157
158
159
160
161
162
163
164
        return anns_results

    def drop_arrays_by_name(self, gt_names, used_classes):
        inds = [i for i, x in enumerate(gt_names) if x not in used_classes]
        inds = np.array(inds, dtype=np.int64)
        return inds

    def keep_arrays_by_name(self, gt_names, used_classes):
        inds = [i for i, x in enumerate(gt_names) if x in used_classes]
        inds = np.array(inds, dtype=np.int64)
        return inds

165
166
167
168
169
170
171
172
173
174
    def remove_dontcare(self, ann_info):
        img_filtered_annotations = {}
        relevant_annotation_indices = [
            i for i, x in enumerate(ann_info['name']) if x != 'DontCare'
        ]
        for key in ann_info.keys():
            img_filtered_annotations[key] = (
                ann_info[key][relevant_annotation_indices])
        return img_filtered_annotations

175
176
177
178
179
180
181
182
183
184
    def format_results(self,
                       outputs,
                       pklfile_prefix=None,
                       submission_prefix=None):
        if pklfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            pklfile_prefix = osp.join(tmp_dir.name, 'results')
        else:
            tmp_dir = None

zhangwenwei's avatar
zhangwenwei committed
185
        if not isinstance(outputs[0], dict):
zhangwenwei's avatar
zhangwenwei committed
186
            result_files = self.bbox2result_kitti2d(outputs, self.CLASSES,
zhangwenwei's avatar
zhangwenwei committed
187
                                                    pklfile_prefix,
188
                                                    submission_prefix)
zhangwenwei's avatar
zhangwenwei committed
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
        elif 'pts_bbox' in outputs[0] or 'img_bbox' in outputs[0]:
            result_files = dict()
            for name in outputs[0]:
                results_ = [out[name] for out in outputs]
                pklfile_prefix_ = pklfile_prefix + name
                if submission_prefix is not None:
                    submission_prefix_ = submission_prefix + name
                else:
                    submission_prefix_ = None
                if 'img' in name:
                    result_files = self.bbox2result_kitti2d(
                        results_, self.CLASSES, pklfile_prefix_,
                        submission_prefix_)
                else:
                    result_files_ = self.bbox2result_kitti(
                        results_, self.CLASSES, pklfile_prefix_,
                        submission_prefix_)
                result_files[name] = result_files_
zhangwenwei's avatar
zhangwenwei committed
207
        else:
zhangwenwei's avatar
zhangwenwei committed
208
            result_files = self.bbox2result_kitti(outputs, self.CLASSES,
209
210
                                                  pklfile_prefix,
                                                  submission_prefix)
zhangwenwei's avatar
zhangwenwei committed
211
        return result_files, tmp_dir
zhangwenwei's avatar
zhangwenwei committed
212

213
214
215
216
217
    def evaluate(self,
                 results,
                 metric=None,
                 logger=None,
                 pklfile_prefix=None,
liyinhao's avatar
liyinhao committed
218
219
220
                 submission_prefix=None,
                 show=False,
                 out_dir=None):
221
222
223
        """Evaluation in KITTI protocol.

        Args:
wangtai's avatar
wangtai committed
224
            results (list[dict]): Testing results of the dataset.
225
226
227
228
229
230
231
232
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            pklfile_prefix (str | None): The prefix of pkl files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            submission_prefix (str | None): The prefix of submission datas.
                If not specified, the submission data will not be generated.
liyinhao's avatar
liyinhao committed
233
234
235
236
            show (bool): Whether to visualize.
                Default: False.
            out_dir (str): Path to save the visualization results.
                Default: None.
237
238

        Returns:
zhangwenwei's avatar
zhangwenwei committed
239
            dict[str: float]: results of each evaluation metric
240
241
        """
        result_files, tmp_dir = self.format_results(results, pklfile_prefix)
zhangwenwei's avatar
zhangwenwei committed
242
        from mmdet3d.core.evaluation import kitti_eval
zhangwenwei's avatar
zhangwenwei committed
243
        gt_annos = [info['annos'] for info in self.data_infos]
zhangwenwei's avatar
zhangwenwei committed
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261

        if isinstance(result_files, dict):
            ap_dict = dict()
            for name, result_files_ in result_files.items():
                eval_types = ['bbox', 'bev', '3d']
                if 'img' in name:
                    eval_types = ['bbox']
                ap_result_str, ap_dict_ = kitti_eval(
                    gt_annos,
                    result_files_,
                    self.CLASSES,
                    eval_types=eval_types)
                for ap_type, ap in ap_dict_.items():
                    ap_dict[f'{name}/{ap_type}'] = float('{:.4f}'.format(ap))

                print_log(
                    f'Results of {name}:\n' + ap_result_str, logger=logger)

zhangwenwei's avatar
zhangwenwei committed
262
        else:
zhangwenwei's avatar
zhangwenwei committed
263
264
265
266
267
268
269
270
            if metric == 'img_bbox':
                ap_result_str, ap_dict = kitti_eval(
                    gt_annos, result_files, self.CLASSES, eval_types=['bbox'])
            else:
                ap_result_str, ap_dict = kitti_eval(gt_annos, result_files,
                                                    self.CLASSES)
            print_log('\n' + ap_result_str, logger=logger)

271
272
        if tmp_dir is not None:
            tmp_dir.cleanup()
liyinhao's avatar
liyinhao committed
273
274
        if show:
            self.show(results, out_dir)
275
        return ap_dict
276
277
278
279
280
281

    def bbox2result_kitti(self,
                          net_outputs,
                          class_names,
                          pklfile_prefix=None,
                          submission_prefix=None):
zhangwenwei's avatar
zhangwenwei committed
282
        assert len(net_outputs) == len(self.data_infos)
283
284
        if submission_prefix is not None:
            mmcv.mkdir_or_exist(submission_prefix)
zhangwenwei's avatar
zhangwenwei committed
285
286

        det_annos = []
zhangwenwei's avatar
zhangwenwei committed
287
        print('\nConverting prediction to KITTI format')
zhangwenwei's avatar
zhangwenwei committed
288
289
290
        for idx, pred_dicts in enumerate(
                mmcv.track_iter_progress(net_outputs)):
            annos = []
zhangwenwei's avatar
zhangwenwei committed
291
            info = self.data_infos[idx]
zhangwenwei's avatar
zhangwenwei committed
292
            sample_idx = info['image']['image_idx']
zhangwenwei's avatar
zhangwenwei committed
293
            image_shape = info['image']['image_shape'][:2]
zhangwenwei's avatar
zhangwenwei committed
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367

            box_dict = self.convert_valid_bboxes(pred_dicts, info)
            if len(box_dict['bbox']) > 0:
                box_2d_preds = box_dict['bbox']
                box_preds = box_dict['box3d_camera']
                scores = box_dict['scores']
                box_preds_lidar = box_dict['box3d_lidar']
                label_preds = box_dict['label_preds']

                anno = {
                    'name': [],
                    'truncated': [],
                    'occluded': [],
                    'alpha': [],
                    'bbox': [],
                    'dimensions': [],
                    'location': [],
                    'rotation_y': [],
                    'score': []
                }

                for box, box_lidar, bbox, score, label in zip(
                        box_preds, box_preds_lidar, box_2d_preds, scores,
                        label_preds):
                    bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])
                    bbox[:2] = np.maximum(bbox[:2], [0, 0])
                    anno['name'].append(class_names[int(label)])
                    anno['truncated'].append(0.0)
                    anno['occluded'].append(0)
                    anno['alpha'].append(
                        -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6])
                    anno['bbox'].append(bbox)
                    anno['dimensions'].append(box[3:6])
                    anno['location'].append(box[:3])
                    anno['rotation_y'].append(box[6])
                    anno['score'].append(score)

                anno = {k: np.stack(v) for k, v in anno.items()}
                annos.append(anno)

                if submission_prefix is not None:
                    curr_file = f'{submission_prefix}/{sample_idx:06d}.txt'
                    with open(curr_file, 'w') as f:
                        bbox = anno['bbox']
                        loc = anno['location']
                        dims = anno['dimensions']  # lhw -> hwl

                        for idx in range(len(bbox)):
                            print(
                                '{} -1 -1 {:.4f} {:.4f} {:.4f} {:.4f} '
                                '{:.4f} {:.4f} {:.4f} '
                                '{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.
                                format(anno['name'][idx], anno['alpha'][idx],
                                       bbox[idx][0], bbox[idx][1],
                                       bbox[idx][2], bbox[idx][3],
                                       dims[idx][1], dims[idx][2],
                                       dims[idx][0], loc[idx][0], loc[idx][1],
                                       loc[idx][2], anno['rotation_y'][idx],
                                       anno['score'][idx]),
                                file=f)
            else:
                annos.append({
                    'name': np.array([]),
                    'truncated': np.array([]),
                    'occluded': np.array([]),
                    'alpha': np.array([]),
                    'bbox': np.zeros([0, 4]),
                    'dimensions': np.zeros([0, 3]),
                    'location': np.zeros([0, 3]),
                    'rotation_y': np.array([]),
                    'score': np.array([]),
                })
            annos[-1]['sample_idx'] = np.array(
                [sample_idx] * len(annos[-1]['score']), dtype=np.int64)
zhangwenwei's avatar
zhangwenwei committed
368
369
370

            det_annos += annos

371
372
373
        if pklfile_prefix is not None:
            if not pklfile_prefix.endswith(('.pkl', '.pickle')):
                out = f'{pklfile_prefix}.pkl'
zhangwenwei's avatar
zhangwenwei committed
374
375
376
377
378
379
380
381
            mmcv.dump(det_annos, out)
            print('Result is saved to %s' % out)

        return det_annos

    def bbox2result_kitti2d(self,
                            net_outputs,
                            class_names,
382
383
                            pklfile_prefix=None,
                            submission_prefix=None):
liyinhao's avatar
liyinhao committed
384
        """Convert results to kitti format for evaluation and test submission.
zhangwenwei's avatar
zhangwenwei committed
385
386

        Args:
wangtai's avatar
wangtai committed
387
388
            net_outputs (List[np.ndarray]): list of array storing the
                bbox and score
zhangwenwei's avatar
zhangwenwei committed
389
            class_nanes (List[String]): A list of class names
390
391
            pklfile_prefix (str | None): The prefix of pkl file.
            submission_prefix (str | None): The prefix of submission file.
zhangwenwei's avatar
zhangwenwei committed
392
393

        Return:
liyinhao's avatar
liyinhao committed
394
            List[dict]: A list of dict have the kitti format
zhangwenwei's avatar
zhangwenwei committed
395
        """
zhangwenwei's avatar
zhangwenwei committed
396
        assert len(net_outputs) == len(self.data_infos)
zhangwenwei's avatar
zhangwenwei committed
397
398

        det_annos = []
zhangwenwei's avatar
zhangwenwei committed
399
        print('\nConverting prediction to KITTI format')
zhangwenwei's avatar
zhangwenwei committed
400
401
402
403
404
405
406
407
408
409
410
411
412
        for i, bboxes_per_sample in enumerate(
                mmcv.track_iter_progress(net_outputs)):
            annos = []
            anno = dict(
                name=[],
                truncated=[],
                occluded=[],
                alpha=[],
                bbox=[],
                dimensions=[],
                location=[],
                rotation_y=[],
                score=[])
zhangwenwei's avatar
zhangwenwei committed
413
            sample_idx = self.data_infos[i]['image']['image_idx']
zhangwenwei's avatar
zhangwenwei committed
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454

            num_example = 0
            for label in range(len(bboxes_per_sample)):
                bbox = bboxes_per_sample[label]
                for i in range(bbox.shape[0]):
                    anno['name'].append(class_names[int(label)])
                    anno['truncated'].append(0.0)
                    anno['occluded'].append(0)
                    anno['alpha'].append(0.0)
                    anno['bbox'].append(bbox[i, :4])
                    # set dimensions (height, width, length) to zero
                    anno['dimensions'].append(
                        np.zeros(shape=[3], dtype=np.float32))
                    # set the 3D translation to (-1000, -1000, -1000)
                    anno['location'].append(
                        np.ones(shape=[3], dtype=np.float32) * (-1000.0))
                    anno['rotation_y'].append(0.0)
                    anno['score'].append(bbox[i, 4])
                    num_example += 1

            if num_example == 0:
                annos.append(
                    dict(
                        name=np.array([]),
                        truncated=np.array([]),
                        occluded=np.array([]),
                        alpha=np.array([]),
                        bbox=np.zeros([0, 4]),
                        dimensions=np.zeros([0, 3]),
                        location=np.zeros([0, 3]),
                        rotation_y=np.array([]),
                        score=np.array([]),
                    ))
            else:
                anno = {k: np.stack(v) for k, v in anno.items()}
                annos.append(anno)

            annos[-1]['sample_idx'] = np.array(
                [sample_idx] * num_example, dtype=np.int64)
            det_annos += annos

455
456
457
458
459
460
461
462
        if pklfile_prefix is not None:
            # save file in pkl format
            pklfile_path = (
                pklfile_prefix[:-4] if pklfile_prefix.endswith(
                    ('.pkl', '.pickle')) else pklfile_prefix)
            mmcv.dump(det_annos, pklfile_path)

        if submission_prefix is not None:
zhangwenwei's avatar
zhangwenwei committed
463
            # save file in submission format
464
465
            mmcv.mkdir_or_exist(submission_prefix)
            print(f'Saving KITTI submission to {submission_prefix}')
zhangwenwei's avatar
zhangwenwei committed
466
            for i, anno in enumerate(det_annos):
zhangwenwei's avatar
zhangwenwei committed
467
                sample_idx = self.data_infos[i]['image']['image_idx']
468
                cur_det_file = f'{submission_prefix}/{sample_idx:06d}.txt'
zhangwenwei's avatar
zhangwenwei committed
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
                with open(cur_det_file, 'w') as f:
                    bbox = anno['bbox']
                    loc = anno['location']
                    dims = anno['dimensions'][::-1]  # lhw -> hwl
                    for idx in range(len(bbox)):
                        print(
                            '{} -1 -1 {:4f} {:4f} {:4f} {:4f} {:4f} {:4f} '
                            '{:4f} {:4f} {:4f} {:4f} {:4f} {:4f} {:4f}'.format(
                                anno['name'][idx],
                                anno['alpha'][idx],
                                *bbox[idx],  # 4 float
                                *dims[idx],  # 3 float
                                *loc[idx],  # 3 float
                                anno['rotation_y'][idx],
                                anno['score'][idx]),
                            file=f,
                        )
486
            print('Result is saved to {}'.format(submission_prefix))
zhangwenwei's avatar
zhangwenwei committed
487
488
489
490
491

        return det_annos

    def convert_valid_bboxes(self, box_dict, info):
        # TODO: refactor this function
492
493
494
        box_preds = box_dict['boxes_3d']
        scores = box_dict['scores_3d']
        labels = box_dict['labels_3d']
zhangwenwei's avatar
zhangwenwei committed
495
        sample_idx = info['image']['image_idx']
496
497
498
        # TODO: remove the hack of yaw
        box_preds.tensor[:, -1] = box_preds.tensor[:, -1] - np.pi
        box_preds.limit_yaw(offset=0.5, period=np.pi * 2)
zhangwenwei's avatar
zhangwenwei committed
499

500
        if len(box_preds) == 0:
zhangwenwei's avatar
zhangwenwei committed
501
            return dict(
502
503
504
505
506
507
                bbox=np.zeros([0, 4]),
                box3d_camera=np.zeros([0, 7]),
                box3d_lidar=np.zeros([0, 7]),
                scores=np.zeros([0]),
                label_preds=np.zeros([0, 4]),
                sample_idx=sample_idx)
zhangwenwei's avatar
zhangwenwei committed
508
509
510
511
512

        rect = info['calib']['R0_rect'].astype(np.float32)
        Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)
        P2 = info['calib']['P2'].astype(np.float32)
        img_shape = info['image']['image_shape']
513
514
515
516
517
        P2 = box_preds.tensor.new_tensor(P2)

        box_preds_camera = box_preds.convert_to(Box3DMode.CAM, rect @ Trv2c)

        box_corners = box_preds_camera.corners
zhangwenwei's avatar
zhangwenwei committed
518
        box_corners_in_image = points_cam2img(box_corners, P2)
zhangwenwei's avatar
zhangwenwei committed
519
520
521
522
523
        # box_corners_in_image: [N, 8, 2]
        minxy = torch.min(box_corners_in_image, dim=1)[0]
        maxxy = torch.max(box_corners_in_image, dim=1)[0]
        box_2d_preds = torch.cat([minxy, maxxy], dim=1)
        # Post-processing
524
525
526
527
528
529
530
531
532
533
        # check box_preds_camera
        image_shape = box_preds.tensor.new_tensor(img_shape)
        valid_cam_inds = ((box_preds_camera.tensor[:, 0] < image_shape[1]) &
                          (box_preds_camera.tensor[:, 1] < image_shape[0]) &
                          (box_preds_camera.tensor[:, 2] > 0) &
                          (box_preds_camera.tensor[:, 3] > 0))
        # check box_preds
        limit_range = box_preds.tensor.new_tensor(self.pcd_limit_range)
        valid_pcd_inds = ((box_preds.center > limit_range[:3]) &
                          (box_preds.center < limit_range[3:]))
zhangwenwei's avatar
zhangwenwei committed
534
535
536
537
538
        valid_inds = valid_cam_inds & valid_pcd_inds.all(-1)

        if valid_inds.sum() > 0:
            return dict(
                bbox=box_2d_preds[valid_inds, :].numpy(),
539
540
541
542
                box3d_camera=box_preds_camera[valid_inds].tensor.numpy(),
                box3d_lidar=box_preds[valid_inds].tensor.numpy(),
                scores=scores[valid_inds].numpy(),
                label_preds=labels[valid_inds].numpy(),
zhangwenwei's avatar
zhangwenwei committed
543
544
545
546
                sample_idx=sample_idx,
            )
        else:
            return dict(
547
548
549
550
551
                bbox=np.zeros([0, 4]),
                box3d_camera=np.zeros([0, 7]),
                box3d_lidar=np.zeros([0, 7]),
                scores=np.zeros([0]),
                label_preds=np.zeros([0, 4]),
zhangwenwei's avatar
zhangwenwei committed
552
553
                sample_idx=sample_idx,
            )
liyinhao's avatar
liyinhao committed
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576

    def show(self, results, out_dir):
        assert out_dir is not None, 'Expect out_dir, got none.'
        for i, result in enumerate(results):
            data_info = self.data_infos[i]
            pts_path = data_info['point_cloud']['velodyne_path']
            file_name = osp.split(pts_path)[-1].split('.')[0]
            points = np.fromfile(
                osp.join(self.root_split, 'velodyne_reduced',
                         f'{file_name}.bin'),
                dtype=np.float32).reshape(-1, 4)
            points = points[..., [1, 0, 2]]
            points[..., 0] *= -1
            gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor
            gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR,
                                          Box3DMode.DEPTH)
            gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2
            pred_bboxes = result['boxes_3d'].tensor.numpy()
            pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR,
                                            Box3DMode.DEPTH)
            pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
            show_result(points, gt_bboxes, pred_bboxes, out_dir, file_name)
        print(results)