kitti_dataset.py 23.5 KB
Newer Older
zhangwenwei's avatar
zhangwenwei committed
1
2
import copy
import os
3
4
import os.path as osp
import tempfile
zhangwenwei's avatar
zhangwenwei committed
5
6
7
8

import mmcv
import numpy as np
import torch
zhangwenwei's avatar
zhangwenwei committed
9
from mmcv.utils import print_log
zhangwenwei's avatar
zhangwenwei committed
10

zhangwenwei's avatar
zhangwenwei committed
11
from mmdet.datasets import DATASETS
liyinhao's avatar
liyinhao committed
12
from ..core import show_result
zhangwenwei's avatar
zhangwenwei committed
13
from ..core.bbox import Box3DMode, CameraInstance3DBoxes, points_cam2img
zhangwenwei's avatar
zhangwenwei committed
14
from .custom_3d import Custom3DDataset
zhangwenwei's avatar
zhangwenwei committed
15
16


17
@DATASETS.register_module()
zhangwenwei's avatar
zhangwenwei committed
18
class KittiDataset(Custom3DDataset):
wangtai's avatar
wangtai committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
    """KITTI Dataset

    This class serves as the API for experiments on the KITTI Dataset.

    Please refer to `<http://www.cvlibs.net/datasets/kitti/eval_object.php?
    obj_benchmark=3d>`_for data downloading. It is recommended to symlink
    the dataset root to $MMDETECTION3D/data and organize them as the doc
    shows.

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
        split (str): Split of input data.
        pts_prefix (str, optional): Prefix of points files.
            Defaults to 'velodyne'.
        pipeline (list[dict], optional): Pipeline used for data processing.
            Defaults to None.
        classes (tuple[str], optional): Classes used in the dataset.
            Defaults to None.
        modality (dict, optional): Modality to specify the sensor data used
            as input. Defaults to None.
        box_type_3d (str, optional): Type of 3D box of this dataset.
            Based on the `box_type_3d`, the dataset will encapsulate the box
            to its original format then converted them to `box_type_3d`.
            Defaults to 'LiDAR' in this dataset. Available options includes

            - 'LiDAR': box in LiDAR coordinates
            - 'Depth': box in depth coordinates, usually for indoor dataset
            - 'Camera': box in camera coordinates
        filter_empty_gt (bool, optional): Whether to filter empty GT.
            Defaults to True.
        test_mode (bool, optional): Whether the dataset is in test mode.
            Defaults to False.
    """
zhangwenwei's avatar
zhangwenwei committed
53
54
55
    CLASSES = ('car', 'pedestrian', 'cyclist')

    def __init__(self,
zhangwenwei's avatar
zhangwenwei committed
56
                 data_root,
zhangwenwei's avatar
zhangwenwei committed
57
58
                 ann_file,
                 split,
zhangwenwei's avatar
zhangwenwei committed
59
                 pts_prefix='velodyne',
zhangwenwei's avatar
zhangwenwei committed
60
                 pipeline=None,
zhangwenwei's avatar
zhangwenwei committed
61
                 classes=None,
zhangwenwei's avatar
zhangwenwei committed
62
                 modality=None,
63
64
                 box_type_3d='LiDAR',
                 filter_empty_gt=True,
zhangwenwei's avatar
zhangwenwei committed
65
                 test_mode=False):
zhangwenwei's avatar
zhangwenwei committed
66
67
68
69
70
71
        super().__init__(
            data_root=data_root,
            ann_file=ann_file,
            pipeline=pipeline,
            classes=classes,
            modality=modality,
72
73
            box_type_3d=box_type_3d,
            filter_empty_gt=filter_empty_gt,
zhangwenwei's avatar
zhangwenwei committed
74
75
76
            test_mode=test_mode)

        self.root_split = os.path.join(self.data_root, split)
zhangwenwei's avatar
zhangwenwei committed
77
78
        assert self.modality is not None
        self.pcd_limit_range = [0, -40, -3, 70.4, 40, 0.0]
zhangwenwei's avatar
zhangwenwei committed
79
        self.pts_prefix = pts_prefix
zhangwenwei's avatar
zhangwenwei committed
80

zhangwenwei's avatar
zhangwenwei committed
81
82
83
84
    def _get_pts_filename(self, idx):
        pts_filename = osp.join(self.root_split, self.pts_prefix,
                                f'{idx:06d}.bin')
        return pts_filename
zhangwenwei's avatar
zhangwenwei committed
85

zhangwenwei's avatar
zhangwenwei committed
86
87
    def get_data_info(self, index):
        info = self.data_infos[index]
zhangwenwei's avatar
zhangwenwei committed
88
        sample_idx = info['image']['image_idx']
zhangwenwei's avatar
zhangwenwei committed
89
        img_filename = os.path.join(self.data_root,
zhangwenwei's avatar
zhangwenwei committed
90
91
                                    info['image']['image_path'])

zhangwenwei's avatar
zhangwenwei committed
92
93
94
95
96
97
        # TODO: consider use torch.Tensor only
        rect = info['calib']['R0_rect'].astype(np.float32)
        Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)
        P2 = info['calib']['P2'].astype(np.float32)
        lidar2img = P2 @ rect @ Trv2c

zhangwenwei's avatar
zhangwenwei committed
98
        pts_filename = self._get_pts_filename(sample_idx)
zhangwenwei's avatar
zhangwenwei committed
99
100
        input_dict = dict(
            sample_idx=sample_idx,
zhangwenwei's avatar
zhangwenwei committed
101
            pts_filename=pts_filename,
zhangwenwei's avatar
zhangwenwei committed
102
103
            img_prefix=None,
            img_info=dict(filename=img_filename),
zhangwenwei's avatar
zhangwenwei committed
104
105
106
            lidar2img=lidar2img)

        if not self.test_mode:
zhangwenwei's avatar
zhangwenwei committed
107
            annos = self.get_ann_info(index)
zhangwenwei's avatar
zhangwenwei committed
108
            input_dict['ann_info'] = annos
zhangwenwei's avatar
zhangwenwei committed
109
110
111
112
113

        return input_dict

    def get_ann_info(self, index):
        # Use index to get the annos, thus the evalhook could also use this api
zhangwenwei's avatar
zhangwenwei committed
114
        info = self.data_infos[index]
zhangwenwei's avatar
zhangwenwei committed
115
116
117
118
119
        rect = info['calib']['R0_rect'].astype(np.float32)
        Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)

        annos = info['annos']
        # we need other objects to avoid collision when sample
120
        annos = self.remove_dontcare(annos)
zhangwenwei's avatar
zhangwenwei committed
121
122
123
124
125
126
127
        loc = annos['location']
        dims = annos['dimensions']
        rots = annos['rotation_y']
        gt_names = annos['name']
        # print(gt_names, len(loc))
        gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]],
                                      axis=1).astype(np.float32)
128
129
130

        # convert gt_bboxes_3d to velodyne coordinates
        gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to(
131
            self.box_mode_3d, np.linalg.inv(rect @ Trv2c))
zhangwenwei's avatar
zhangwenwei committed
132
133
134
        gt_bboxes = annos['bbox']

        selected = self.drop_arrays_by_name(gt_names, ['DontCare'])
135
        # gt_bboxes_3d = gt_bboxes_3d[selected].astype('float32')
zhangwenwei's avatar
zhangwenwei committed
136
137
138
139
140
141
142
143
144
145
146
        gt_bboxes = gt_bboxes[selected].astype('float32')
        gt_names = gt_names[selected]

        gt_labels = []
        for cat in gt_names:
            if cat in self.CLASSES:
                gt_labels.append(self.CLASSES.index(cat))
            else:
                gt_labels.append(-1)
        gt_labels = np.array(gt_labels)
        gt_labels_3d = copy.deepcopy(gt_labels)
zhangwenwei's avatar
zhangwenwei committed
147
148
149

        anns_results = dict(
            gt_bboxes_3d=gt_bboxes_3d,
zhangwenwei's avatar
zhangwenwei committed
150
            gt_labels_3d=gt_labels_3d,
zhangwenwei's avatar
zhangwenwei committed
151
            bboxes=gt_bboxes,
liyinhao's avatar
liyinhao committed
152
153
            labels=gt_labels,
            gt_names=gt_names)
zhangwenwei's avatar
zhangwenwei committed
154
155
156
157
158
159
160
161
162
163
164
165
        return anns_results

    def drop_arrays_by_name(self, gt_names, used_classes):
        inds = [i for i, x in enumerate(gt_names) if x not in used_classes]
        inds = np.array(inds, dtype=np.int64)
        return inds

    def keep_arrays_by_name(self, gt_names, used_classes):
        inds = [i for i, x in enumerate(gt_names) if x in used_classes]
        inds = np.array(inds, dtype=np.int64)
        return inds

166
167
168
169
170
171
172
173
174
175
    def remove_dontcare(self, ann_info):
        img_filtered_annotations = {}
        relevant_annotation_indices = [
            i for i, x in enumerate(ann_info['name']) if x != 'DontCare'
        ]
        for key in ann_info.keys():
            img_filtered_annotations[key] = (
                ann_info[key][relevant_annotation_indices])
        return img_filtered_annotations

176
177
178
179
180
181
182
183
184
185
    def format_results(self,
                       outputs,
                       pklfile_prefix=None,
                       submission_prefix=None):
        if pklfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            pklfile_prefix = osp.join(tmp_dir.name, 'results')
        else:
            tmp_dir = None

zhangwenwei's avatar
zhangwenwei committed
186
        if not isinstance(outputs[0], dict):
zhangwenwei's avatar
zhangwenwei committed
187
            result_files = self.bbox2result_kitti2d(outputs, self.CLASSES,
zhangwenwei's avatar
zhangwenwei committed
188
                                                    pklfile_prefix,
189
                                                    submission_prefix)
zhangwenwei's avatar
zhangwenwei committed
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
        elif 'pts_bbox' in outputs[0] or 'img_bbox' in outputs[0]:
            result_files = dict()
            for name in outputs[0]:
                results_ = [out[name] for out in outputs]
                pklfile_prefix_ = pklfile_prefix + name
                if submission_prefix is not None:
                    submission_prefix_ = submission_prefix + name
                else:
                    submission_prefix_ = None
                if 'img' in name:
                    result_files = self.bbox2result_kitti2d(
                        results_, self.CLASSES, pklfile_prefix_,
                        submission_prefix_)
                else:
                    result_files_ = self.bbox2result_kitti(
                        results_, self.CLASSES, pklfile_prefix_,
                        submission_prefix_)
                result_files[name] = result_files_
zhangwenwei's avatar
zhangwenwei committed
208
        else:
zhangwenwei's avatar
zhangwenwei committed
209
            result_files = self.bbox2result_kitti(outputs, self.CLASSES,
210
211
                                                  pklfile_prefix,
                                                  submission_prefix)
zhangwenwei's avatar
zhangwenwei committed
212
        return result_files, tmp_dir
zhangwenwei's avatar
zhangwenwei committed
213

214
215
216
217
218
    def evaluate(self,
                 results,
                 metric=None,
                 logger=None,
                 pklfile_prefix=None,
liyinhao's avatar
liyinhao committed
219
220
221
                 submission_prefix=None,
                 show=False,
                 out_dir=None):
222
223
224
        """Evaluation in KITTI protocol.

        Args:
wangtai's avatar
wangtai committed
225
            results (list[dict]): Testing results of the dataset.
226
227
228
229
230
231
232
233
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            pklfile_prefix (str | None): The prefix of pkl files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            submission_prefix (str | None): The prefix of submission datas.
                If not specified, the submission data will not be generated.
liyinhao's avatar
liyinhao committed
234
235
236
237
            show (bool): Whether to visualize.
                Default: False.
            out_dir (str): Path to save the visualization results.
                Default: None.
238
239

        Returns:
zhangwenwei's avatar
zhangwenwei committed
240
            dict[str: float]: results of each evaluation metric
241
242
        """
        result_files, tmp_dir = self.format_results(results, pklfile_prefix)
zhangwenwei's avatar
zhangwenwei committed
243
        from mmdet3d.core.evaluation import kitti_eval
zhangwenwei's avatar
zhangwenwei committed
244
        gt_annos = [info['annos'] for info in self.data_infos]
zhangwenwei's avatar
zhangwenwei committed
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262

        if isinstance(result_files, dict):
            ap_dict = dict()
            for name, result_files_ in result_files.items():
                eval_types = ['bbox', 'bev', '3d']
                if 'img' in name:
                    eval_types = ['bbox']
                ap_result_str, ap_dict_ = kitti_eval(
                    gt_annos,
                    result_files_,
                    self.CLASSES,
                    eval_types=eval_types)
                for ap_type, ap in ap_dict_.items():
                    ap_dict[f'{name}/{ap_type}'] = float('{:.4f}'.format(ap))

                print_log(
                    f'Results of {name}:\n' + ap_result_str, logger=logger)

zhangwenwei's avatar
zhangwenwei committed
263
        else:
zhangwenwei's avatar
zhangwenwei committed
264
265
266
267
268
269
270
271
            if metric == 'img_bbox':
                ap_result_str, ap_dict = kitti_eval(
                    gt_annos, result_files, self.CLASSES, eval_types=['bbox'])
            else:
                ap_result_str, ap_dict = kitti_eval(gt_annos, result_files,
                                                    self.CLASSES)
            print_log('\n' + ap_result_str, logger=logger)

272
273
        if tmp_dir is not None:
            tmp_dir.cleanup()
liyinhao's avatar
liyinhao committed
274
275
        if show:
            self.show(results, out_dir)
276
        return ap_dict
277
278
279
280
281
282

    def bbox2result_kitti(self,
                          net_outputs,
                          class_names,
                          pklfile_prefix=None,
                          submission_prefix=None):
zhangwenwei's avatar
zhangwenwei committed
283
        assert len(net_outputs) == len(self.data_infos)
284
285
        if submission_prefix is not None:
            mmcv.mkdir_or_exist(submission_prefix)
zhangwenwei's avatar
zhangwenwei committed
286
287

        det_annos = []
zhangwenwei's avatar
zhangwenwei committed
288
        print('\nConverting prediction to KITTI format')
zhangwenwei's avatar
zhangwenwei committed
289
290
291
        for idx, pred_dicts in enumerate(
                mmcv.track_iter_progress(net_outputs)):
            annos = []
zhangwenwei's avatar
zhangwenwei committed
292
            info = self.data_infos[idx]
zhangwenwei's avatar
zhangwenwei committed
293
            sample_idx = info['image']['image_idx']
zhangwenwei's avatar
zhangwenwei committed
294
            image_shape = info['image']['image_shape'][:2]
zhangwenwei's avatar
zhangwenwei committed
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368

            box_dict = self.convert_valid_bboxes(pred_dicts, info)
            if len(box_dict['bbox']) > 0:
                box_2d_preds = box_dict['bbox']
                box_preds = box_dict['box3d_camera']
                scores = box_dict['scores']
                box_preds_lidar = box_dict['box3d_lidar']
                label_preds = box_dict['label_preds']

                anno = {
                    'name': [],
                    'truncated': [],
                    'occluded': [],
                    'alpha': [],
                    'bbox': [],
                    'dimensions': [],
                    'location': [],
                    'rotation_y': [],
                    'score': []
                }

                for box, box_lidar, bbox, score, label in zip(
                        box_preds, box_preds_lidar, box_2d_preds, scores,
                        label_preds):
                    bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])
                    bbox[:2] = np.maximum(bbox[:2], [0, 0])
                    anno['name'].append(class_names[int(label)])
                    anno['truncated'].append(0.0)
                    anno['occluded'].append(0)
                    anno['alpha'].append(
                        -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6])
                    anno['bbox'].append(bbox)
                    anno['dimensions'].append(box[3:6])
                    anno['location'].append(box[:3])
                    anno['rotation_y'].append(box[6])
                    anno['score'].append(score)

                anno = {k: np.stack(v) for k, v in anno.items()}
                annos.append(anno)

                if submission_prefix is not None:
                    curr_file = f'{submission_prefix}/{sample_idx:06d}.txt'
                    with open(curr_file, 'w') as f:
                        bbox = anno['bbox']
                        loc = anno['location']
                        dims = anno['dimensions']  # lhw -> hwl

                        for idx in range(len(bbox)):
                            print(
                                '{} -1 -1 {:.4f} {:.4f} {:.4f} {:.4f} '
                                '{:.4f} {:.4f} {:.4f} '
                                '{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.
                                format(anno['name'][idx], anno['alpha'][idx],
                                       bbox[idx][0], bbox[idx][1],
                                       bbox[idx][2], bbox[idx][3],
                                       dims[idx][1], dims[idx][2],
                                       dims[idx][0], loc[idx][0], loc[idx][1],
                                       loc[idx][2], anno['rotation_y'][idx],
                                       anno['score'][idx]),
                                file=f)
            else:
                annos.append({
                    'name': np.array([]),
                    'truncated': np.array([]),
                    'occluded': np.array([]),
                    'alpha': np.array([]),
                    'bbox': np.zeros([0, 4]),
                    'dimensions': np.zeros([0, 3]),
                    'location': np.zeros([0, 3]),
                    'rotation_y': np.array([]),
                    'score': np.array([]),
                })
            annos[-1]['sample_idx'] = np.array(
                [sample_idx] * len(annos[-1]['score']), dtype=np.int64)
zhangwenwei's avatar
zhangwenwei committed
369
370
371

            det_annos += annos

372
373
374
        if pklfile_prefix is not None:
            if not pklfile_prefix.endswith(('.pkl', '.pickle')):
                out = f'{pklfile_prefix}.pkl'
zhangwenwei's avatar
zhangwenwei committed
375
376
377
378
379
380
381
382
            mmcv.dump(det_annos, out)
            print('Result is saved to %s' % out)

        return det_annos

    def bbox2result_kitti2d(self,
                            net_outputs,
                            class_names,
383
384
                            pklfile_prefix=None,
                            submission_prefix=None):
liyinhao's avatar
liyinhao committed
385
        """Convert results to kitti format for evaluation and test submission.
zhangwenwei's avatar
zhangwenwei committed
386
387

        Args:
wangtai's avatar
wangtai committed
388
389
            net_outputs (List[np.ndarray]): list of array storing the
                bbox and score
zhangwenwei's avatar
zhangwenwei committed
390
            class_nanes (List[String]): A list of class names
391
392
            pklfile_prefix (str | None): The prefix of pkl file.
            submission_prefix (str | None): The prefix of submission file.
zhangwenwei's avatar
zhangwenwei committed
393
394

        Return:
liyinhao's avatar
liyinhao committed
395
            List[dict]: A list of dict have the kitti format
zhangwenwei's avatar
zhangwenwei committed
396
        """
zhangwenwei's avatar
zhangwenwei committed
397
        assert len(net_outputs) == len(self.data_infos)
zhangwenwei's avatar
zhangwenwei committed
398
399

        det_annos = []
zhangwenwei's avatar
zhangwenwei committed
400
        print('\nConverting prediction to KITTI format')
zhangwenwei's avatar
zhangwenwei committed
401
402
403
404
405
406
407
408
409
410
411
412
413
        for i, bboxes_per_sample in enumerate(
                mmcv.track_iter_progress(net_outputs)):
            annos = []
            anno = dict(
                name=[],
                truncated=[],
                occluded=[],
                alpha=[],
                bbox=[],
                dimensions=[],
                location=[],
                rotation_y=[],
                score=[])
zhangwenwei's avatar
zhangwenwei committed
414
            sample_idx = self.data_infos[i]['image']['image_idx']
zhangwenwei's avatar
zhangwenwei committed
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455

            num_example = 0
            for label in range(len(bboxes_per_sample)):
                bbox = bboxes_per_sample[label]
                for i in range(bbox.shape[0]):
                    anno['name'].append(class_names[int(label)])
                    anno['truncated'].append(0.0)
                    anno['occluded'].append(0)
                    anno['alpha'].append(0.0)
                    anno['bbox'].append(bbox[i, :4])
                    # set dimensions (height, width, length) to zero
                    anno['dimensions'].append(
                        np.zeros(shape=[3], dtype=np.float32))
                    # set the 3D translation to (-1000, -1000, -1000)
                    anno['location'].append(
                        np.ones(shape=[3], dtype=np.float32) * (-1000.0))
                    anno['rotation_y'].append(0.0)
                    anno['score'].append(bbox[i, 4])
                    num_example += 1

            if num_example == 0:
                annos.append(
                    dict(
                        name=np.array([]),
                        truncated=np.array([]),
                        occluded=np.array([]),
                        alpha=np.array([]),
                        bbox=np.zeros([0, 4]),
                        dimensions=np.zeros([0, 3]),
                        location=np.zeros([0, 3]),
                        rotation_y=np.array([]),
                        score=np.array([]),
                    ))
            else:
                anno = {k: np.stack(v) for k, v in anno.items()}
                annos.append(anno)

            annos[-1]['sample_idx'] = np.array(
                [sample_idx] * num_example, dtype=np.int64)
            det_annos += annos

456
457
458
459
460
461
462
463
        if pklfile_prefix is not None:
            # save file in pkl format
            pklfile_path = (
                pklfile_prefix[:-4] if pklfile_prefix.endswith(
                    ('.pkl', '.pickle')) else pklfile_prefix)
            mmcv.dump(det_annos, pklfile_path)

        if submission_prefix is not None:
zhangwenwei's avatar
zhangwenwei committed
464
            # save file in submission format
465
466
            mmcv.mkdir_or_exist(submission_prefix)
            print(f'Saving KITTI submission to {submission_prefix}')
zhangwenwei's avatar
zhangwenwei committed
467
            for i, anno in enumerate(det_annos):
zhangwenwei's avatar
zhangwenwei committed
468
                sample_idx = self.data_infos[i]['image']['image_idx']
469
                cur_det_file = f'{submission_prefix}/{sample_idx:06d}.txt'
zhangwenwei's avatar
zhangwenwei committed
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
                with open(cur_det_file, 'w') as f:
                    bbox = anno['bbox']
                    loc = anno['location']
                    dims = anno['dimensions'][::-1]  # lhw -> hwl
                    for idx in range(len(bbox)):
                        print(
                            '{} -1 -1 {:4f} {:4f} {:4f} {:4f} {:4f} {:4f} '
                            '{:4f} {:4f} {:4f} {:4f} {:4f} {:4f} {:4f}'.format(
                                anno['name'][idx],
                                anno['alpha'][idx],
                                *bbox[idx],  # 4 float
                                *dims[idx],  # 3 float
                                *loc[idx],  # 3 float
                                anno['rotation_y'][idx],
                                anno['score'][idx]),
                            file=f,
                        )
487
            print('Result is saved to {}'.format(submission_prefix))
zhangwenwei's avatar
zhangwenwei committed
488
489
490
491
492

        return det_annos

    def convert_valid_bboxes(self, box_dict, info):
        # TODO: refactor this function
493
494
495
        box_preds = box_dict['boxes_3d']
        scores = box_dict['scores_3d']
        labels = box_dict['labels_3d']
zhangwenwei's avatar
zhangwenwei committed
496
        sample_idx = info['image']['image_idx']
497
498
499
        # TODO: remove the hack of yaw
        box_preds.tensor[:, -1] = box_preds.tensor[:, -1] - np.pi
        box_preds.limit_yaw(offset=0.5, period=np.pi * 2)
zhangwenwei's avatar
zhangwenwei committed
500

501
        if len(box_preds) == 0:
zhangwenwei's avatar
zhangwenwei committed
502
            return dict(
503
504
505
506
507
508
                bbox=np.zeros([0, 4]),
                box3d_camera=np.zeros([0, 7]),
                box3d_lidar=np.zeros([0, 7]),
                scores=np.zeros([0]),
                label_preds=np.zeros([0, 4]),
                sample_idx=sample_idx)
zhangwenwei's avatar
zhangwenwei committed
509
510
511
512
513

        rect = info['calib']['R0_rect'].astype(np.float32)
        Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)
        P2 = info['calib']['P2'].astype(np.float32)
        img_shape = info['image']['image_shape']
514
515
516
517
518
        P2 = box_preds.tensor.new_tensor(P2)

        box_preds_camera = box_preds.convert_to(Box3DMode.CAM, rect @ Trv2c)

        box_corners = box_preds_camera.corners
zhangwenwei's avatar
zhangwenwei committed
519
        box_corners_in_image = points_cam2img(box_corners, P2)
zhangwenwei's avatar
zhangwenwei committed
520
521
522
523
524
        # box_corners_in_image: [N, 8, 2]
        minxy = torch.min(box_corners_in_image, dim=1)[0]
        maxxy = torch.max(box_corners_in_image, dim=1)[0]
        box_2d_preds = torch.cat([minxy, maxxy], dim=1)
        # Post-processing
525
526
527
528
529
530
531
532
533
534
        # check box_preds_camera
        image_shape = box_preds.tensor.new_tensor(img_shape)
        valid_cam_inds = ((box_preds_camera.tensor[:, 0] < image_shape[1]) &
                          (box_preds_camera.tensor[:, 1] < image_shape[0]) &
                          (box_preds_camera.tensor[:, 2] > 0) &
                          (box_preds_camera.tensor[:, 3] > 0))
        # check box_preds
        limit_range = box_preds.tensor.new_tensor(self.pcd_limit_range)
        valid_pcd_inds = ((box_preds.center > limit_range[:3]) &
                          (box_preds.center < limit_range[3:]))
zhangwenwei's avatar
zhangwenwei committed
535
536
537
538
539
        valid_inds = valid_cam_inds & valid_pcd_inds.all(-1)

        if valid_inds.sum() > 0:
            return dict(
                bbox=box_2d_preds[valid_inds, :].numpy(),
540
541
542
543
                box3d_camera=box_preds_camera[valid_inds].tensor.numpy(),
                box3d_lidar=box_preds[valid_inds].tensor.numpy(),
                scores=scores[valid_inds].numpy(),
                label_preds=labels[valid_inds].numpy(),
zhangwenwei's avatar
zhangwenwei committed
544
545
546
547
                sample_idx=sample_idx,
            )
        else:
            return dict(
548
549
550
551
552
                bbox=np.zeros([0, 4]),
                box3d_camera=np.zeros([0, 7]),
                box3d_lidar=np.zeros([0, 7]),
                scores=np.zeros([0]),
                label_preds=np.zeros([0, 4]),
zhangwenwei's avatar
zhangwenwei committed
553
554
                sample_idx=sample_idx,
            )
liyinhao's avatar
liyinhao committed
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577

    def show(self, results, out_dir):
        assert out_dir is not None, 'Expect out_dir, got none.'
        for i, result in enumerate(results):
            data_info = self.data_infos[i]
            pts_path = data_info['point_cloud']['velodyne_path']
            file_name = osp.split(pts_path)[-1].split('.')[0]
            points = np.fromfile(
                osp.join(self.root_split, 'velodyne_reduced',
                         f'{file_name}.bin'),
                dtype=np.float32).reshape(-1, 4)
            points = points[..., [1, 0, 2]]
            points[..., 0] *= -1
            gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor
            gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR,
                                          Box3DMode.DEPTH)
            gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2
            pred_bboxes = result['boxes_3d'].tensor.numpy()
            pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR,
                                            Box3DMode.DEPTH)
            pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
            show_result(points, gt_bboxes, pred_bboxes, out_dir, file_name)
        print(results)