kitti_metric.py 27.9 KB
Newer Older
VVsssssk's avatar
VVsssssk committed
1
2
3
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from os import path as osp
4
from typing import Dict, List, Optional, Sequence, Tuple, Union
VVsssssk's avatar
VVsssssk committed
5

6
import mmengine
VVsssssk's avatar
VVsssssk committed
7
8
import numpy as np
import torch
9
from mmengine import load
VVsssssk's avatar
VVsssssk committed
10
from mmengine.evaluator import BaseMetric
11
from mmengine.logging import MMLogger, print_log
VVsssssk's avatar
VVsssssk committed
12

zhangshilong's avatar
zhangshilong committed
13
from mmdet3d.evaluation import kitti_eval
VVsssssk's avatar
VVsssssk committed
14
from mmdet3d.registry import METRICS
zhangshilong's avatar
zhangshilong committed
15
16
from mmdet3d.structures import (Box3DMode, CameraInstance3DBoxes,
                                LiDARInstance3DBoxes, points_cam2img)
VVsssssk's avatar
VVsssssk committed
17
18
19
20
21
22
23
24


@METRICS.register_module()
class KittiMetric(BaseMetric):
    """Kitti evaluation metric.

    Args:
        ann_file (str): Annotation file path.
25
26
27
        metric (str or List[str]): Metrics to be evaluated. Defaults to 'bbox'.
        pcd_limit_range (List[float]): The range of point cloud used to filter
            invalid predicted boxes. Defaults to [0, -40, -3, 70.4, 40, 0.0].
VVsssssk's avatar
VVsssssk committed
28
29
        prefix (str, optional): The prefix that will be added in the metric
            names to disambiguate homonymous metrics of different evaluators.
30
31
32
33
34
            If prefix is not provided in the argument, self.default_prefix will
            be used instead. Defaults to None.
        pklfile_prefix (str, optional): The prefix of pkl files, including the
            file path and the prefix of filename, e.g., "a/b/prefix". If not
            specified, a temp file will be created. Defaults to None.
35
36
        default_cam_key (str): The default camera for lidar to camera
            conversion. By default, KITTI: 'CAM2', Waymo: 'CAM_FRONT'.
37
            Defaults to 'CAM2'.
38
        format_only (bool): Format the output results without perform
39
40
            evaluation. It is useful when you want to format the result to a
            specific format and submit it to the test server.
41
            Defaults to False.
42
43
        submission_prefix (str, optional): The prefix of submission data. If
            not specified, the submission data will not be generated.
44
            Defaults to None.
45
46
        collect_device (str): Device name used for collecting results from
            different ranks during distributed training. Must be 'cpu' or
VVsssssk's avatar
VVsssssk committed
47
            'gpu'. Defaults to 'cpu'.
48
49
        backend_args (dict, optional): Arguments to instantiate the
            corresponding backend. Defaults to None.
VVsssssk's avatar
VVsssssk committed
50
51
    """

52
53
54
55
56
57
58
59
60
61
62
    def __init__(self,
                 ann_file: str,
                 metric: Union[str, List[str]] = 'bbox',
                 pcd_limit_range: List[float] = [0, -40, -3, 70.4, 40, 0.0],
                 prefix: Optional[str] = None,
                 pklfile_prefix: Optional[str] = None,
                 default_cam_key: str = 'CAM2',
                 format_only: bool = False,
                 submission_prefix: Optional[str] = None,
                 collect_device: str = 'cpu',
                 backend_args: Optional[dict] = None) -> None:
VVsssssk's avatar
VVsssssk committed
63
64
65
66
67
68
        self.default_prefix = 'Kitti metric'
        super(KittiMetric, self).__init__(
            collect_device=collect_device, prefix=prefix)
        self.pcd_limit_range = pcd_limit_range
        self.ann_file = ann_file
        self.pklfile_prefix = pklfile_prefix
69
70
        self.format_only = format_only
        if self.format_only:
71
72
73
            assert submission_prefix is not None, 'submission_prefix must be '
            'not None when format_only is True, otherwise the result files '
            'will be saved to a temp directory which will be cleaned up at '
74
75
            'the end.'

VVsssssk's avatar
VVsssssk committed
76
        self.submission_prefix = submission_prefix
77
        self.default_cam_key = default_cam_key
78
        self.backend_args = backend_args
79
80

        allowed_metrics = ['bbox', 'img_bbox', 'mAP', 'LET_mAP']
VVsssssk's avatar
VVsssssk committed
81
82
83
84
        self.metrics = metric if isinstance(metric, list) else [metric]
        for metric in self.metrics:
            if metric not in allowed_metrics:
                raise KeyError("metric should be one of 'bbox', 'img_bbox', "
85
                               f'but got {metric}.')
VVsssssk's avatar
VVsssssk committed
86

87
    def convert_annos_to_kitti_annos(self, data_infos: dict) -> List[dict]:
VVsssssk's avatar
VVsssssk committed
88
89
90
        """Convert loading annotations to Kitti annotations.

        Args:
91
92
            data_infos (dict): Data infos including metainfo and annotations
                loaded from ann_file.
VVsssssk's avatar
VVsssssk committed
93
94
95
96

        Returns:
            List[dict]: List of Kitti annotations.
        """
97
        data_annos = data_infos['data_list']
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
        if not self.format_only:
            cat2label = data_infos['metainfo']['categories']
            label2cat = dict((v, k) for (k, v) in cat2label.items())
            assert 'instances' in data_annos[0]
            for i, annos in enumerate(data_annos):
                if len(annos['instances']) == 0:
                    kitti_annos = {
                        'name': np.array([]),
                        'truncated': np.array([]),
                        'occluded': np.array([]),
                        'alpha': np.array([]),
                        'bbox': np.zeros([0, 4]),
                        'dimensions': np.zeros([0, 3]),
                        'location': np.zeros([0, 3]),
                        'rotation_y': np.array([]),
                        'score': np.array([]),
                    }
                else:
                    kitti_annos = {
                        'name': [],
                        'truncated': [],
                        'occluded': [],
                        'alpha': [],
                        'bbox': [],
                        'location': [],
                        'dimensions': [],
                        'rotation_y': [],
                        'score': []
                    }
                    for instance in annos['instances']:
                        label = instance['bbox_label']
                        kitti_annos['name'].append(label2cat[label])
                        kitti_annos['truncated'].append(instance['truncated'])
                        kitti_annos['occluded'].append(instance['occluded'])
                        kitti_annos['alpha'].append(instance['alpha'])
                        kitti_annos['bbox'].append(instance['bbox'])
                        kitti_annos['location'].append(instance['bbox_3d'][:3])
                        kitti_annos['dimensions'].append(
                            instance['bbox_3d'][3:6])
                        kitti_annos['rotation_y'].append(
                            instance['bbox_3d'][6])
                        kitti_annos['score'].append(instance['score'])
                    for name in kitti_annos:
                        kitti_annos[name] = np.array(kitti_annos[name])
                data_annos[i]['kitti_annos'] = kitti_annos
VVsssssk's avatar
VVsssssk committed
143
144
        return data_annos

145
    def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
VVsssssk's avatar
VVsssssk committed
146
147
        """Process one batch of data samples and predictions.

148
149
        The processed results should be stored in ``self.results``, which will
        be used to compute the metrics when all batches have been processed.
VVsssssk's avatar
VVsssssk committed
150
151

        Args:
152
            data_batch (dict): A batch of data from the dataloader.
153
            data_samples (Sequence[dict]): A batch of outputs from the model.
VVsssssk's avatar
VVsssssk committed
154
        """
155
156

        for data_sample in data_samples:
VVsssssk's avatar
VVsssssk committed
157
            result = dict()
158
159
160
161
162
163
164
165
166
            pred_3d = data_sample['pred_instances_3d']
            pred_2d = data_sample['pred_instances']
            for attr_name in pred_3d:
                pred_3d[attr_name] = pred_3d[attr_name].to('cpu')
            result['pred_instances_3d'] = pred_3d
            for attr_name in pred_2d:
                pred_2d[attr_name] = pred_2d[attr_name].to('cpu')
            result['pred_instances'] = pred_2d
            sample_idx = data_sample['sample_idx']
167
            result['sample_idx'] = sample_idx
168
            self.results.append(result)
VVsssssk's avatar
VVsssssk committed
169

170
    def compute_metrics(self, results: List[dict]) -> Dict[str, float]:
VVsssssk's avatar
VVsssssk committed
171
172
173
        """Compute the metrics from processed results.

        Args:
174
            results (List[dict]): The processed results of the whole dataset.
VVsssssk's avatar
VVsssssk committed
175
176
177
178
179
180

        Returns:
            Dict[str, float]: The computed metrics. The keys are the names of
            the metrics, and the values are corresponding results.
        """
        logger: MMLogger = MMLogger.get_current_instance()
181
        self.classes = self.dataset_meta['classes']
VVsssssk's avatar
VVsssssk committed
182
183

        # load annotations
184
        pkl_infos = load(self.ann_file, backend_args=self.backend_args)
185
        self.data_infos = self.convert_annos_to_kitti_annos(pkl_infos)
VVsssssk's avatar
VVsssssk committed
186
187
188
189
190
191
        result_dict, tmp_dir = self.format_results(
            results,
            pklfile_prefix=self.pklfile_prefix,
            submission_prefix=self.submission_prefix,
            classes=self.classes)

192
193
194
        metric_dict = {}

        if self.format_only:
195
196
            logger.info(
                f'results are saved in {osp.dirname(self.submission_prefix)}')
197
198
            return metric_dict

VVsssssk's avatar
VVsssssk committed
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
        gt_annos = [
            self.data_infos[result['sample_idx']]['kitti_annos']
            for result in results
        ]

        for metric in self.metrics:
            ap_dict = self.kitti_evaluate(
                result_dict,
                gt_annos,
                metric=metric,
                logger=logger,
                classes=self.classes)
            for result in ap_dict:
                metric_dict[result] = ap_dict[result]

        if tmp_dir is not None:
            tmp_dir.cleanup()
        return metric_dict

    def kitti_evaluate(self,
219
                       results_dict: dict,
VVsssssk's avatar
VVsssssk committed
220
                       gt_annos: List[dict],
221
222
223
                       metric: Optional[str] = None,
                       classes: Optional[List[str]] = None,
                       logger: Optional[MMLogger] = None) -> Dict[str, float]:
VVsssssk's avatar
VVsssssk committed
224
225
226
227
        """Evaluation in KITTI protocol.

        Args:
            results_dict (dict): Formatted results of the dataset.
228
            gt_annos (List[dict]): Contain gt information of each sample.
229
            metric (str, optional): Metrics to be evaluated. Defaults to None.
230
231
            classes (List[str], optional): A list of class name.
                Defaults to None.
232
233
            logger (MMLogger, optional): Logger used for printing related
                information during evaluation. Defaults to None.
VVsssssk's avatar
VVsssssk committed
234
235

        Returns:
236
            Dict[str, float]: Results of each evaluation metric.
VVsssssk's avatar
VVsssssk committed
237
238
        """
        ap_dict = dict()
VVsssssk's avatar
VVsssssk committed
239
        for name in results_dict:
VVsssssk's avatar
VVsssssk committed
240
241
242
243
244
            if name == 'pred_instances' or metric == 'img_bbox':
                eval_types = ['bbox']
            else:
                eval_types = ['bbox', 'bev', '3d']
            ap_result_str, ap_dict_ = kitti_eval(
VVsssssk's avatar
VVsssssk committed
245
                gt_annos, results_dict[name], classes, eval_types=eval_types)
VVsssssk's avatar
VVsssssk committed
246
            for ap_type, ap in ap_dict_.items():
247
                ap_dict[f'{name}/{ap_type}'] = float(f'{ap:.4f}')
VVsssssk's avatar
VVsssssk committed
248
249
250
251
252

            print_log(f'Results of {name}:\n' + ap_result_str, logger=logger)

        return ap_dict

253
254
255
256
257
258
259
    def format_results(
        self,
        results: List[dict],
        pklfile_prefix: Optional[str] = None,
        submission_prefix: Optional[str] = None,
        classes: Optional[List[str]] = None
    ) -> Tuple[dict, Union[tempfile.TemporaryDirectory, None]]:
VVsssssk's avatar
VVsssssk committed
260
261
262
        """Format the results to pkl file.

        Args:
263
            results (List[dict]): Testing results of the dataset.
VVsssssk's avatar
VVsssssk committed
264
265
266
            pklfile_prefix (str, optional): The prefix of pkl files. It
                includes the file path and the prefix of filename, e.g.,
                "a/b/prefix". If not specified, a temp file will be created.
267
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
268
269
270
            submission_prefix (str, optional): The prefix of submitted files.
                It includes the file path and the prefix of filename, e.g.,
                "a/b/prefix". If not specified, a temp file will be created.
271
272
273
                Defaults to None.
            classes (List[str], optional): A list of class name.
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
274
275

        Returns:
276
277
278
            tuple: (result_dict, tmp_dir), result_dict is a dict containing the
            formatted result, tmp_dir is the temporal directory created for
            saving json files when jsonfile_prefix is not specified.
VVsssssk's avatar
VVsssssk committed
279
280
281
282
283
284
285
        """
        if pklfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            pklfile_prefix = osp.join(tmp_dir.name, 'results')
        else:
            tmp_dir = None
        result_dict = dict()
286
        sample_idx_list = [result['sample_idx'] for result in results]
VVsssssk's avatar
VVsssssk committed
287
288
289
290
291
292
293
294
295
        for name in results[0]:
            if submission_prefix is not None:
                submission_prefix_ = osp.join(submission_prefix, name)
            else:
                submission_prefix_ = None
            if pklfile_prefix is not None:
                pklfile_prefix_ = osp.join(pklfile_prefix, name) + '.pkl'
            else:
                pklfile_prefix_ = None
296
297
            if 'pred_instances' in name and '3d' in name and name[
                    0] != '_' and results[0][name]:
VVsssssk's avatar
VVsssssk committed
298
299
                net_outputs = [result[name] for result in results]
                result_list_ = self.bbox2result_kitti(net_outputs,
300
                                                      sample_idx_list, classes,
VVsssssk's avatar
VVsssssk committed
301
302
303
                                                      pklfile_prefix_,
                                                      submission_prefix_)
                result_dict[name] = result_list_
304
305
306
            elif name == 'pred_instances' and name[0] != '_' and results[0][
                    name]:
                net_outputs = [result[name] for result in results]
VVsssssk's avatar
VVsssssk committed
307
                result_list_ = self.bbox2result_kitti2d(
308
                    net_outputs, sample_idx_list, classes, pklfile_prefix_,
VVsssssk's avatar
VVsssssk committed
309
310
311
312
                    submission_prefix_)
                result_dict[name] = result_list_
        return result_dict, tmp_dir

313
314
315
316
317
318
319
    def bbox2result_kitti(
            self,
            net_outputs: List[dict],
            sample_idx_list: List[int],
            class_names: List[str],
            pklfile_prefix: Optional[str] = None,
            submission_prefix: Optional[str] = None) -> List[dict]:
VVsssssk's avatar
VVsssssk committed
320
321
322
323
        """Convert 3D detection results to kitti format for evaluation and test
        submission.

        Args:
324
325
            net_outputs (List[dict]): List of dict storing the inferenced
                bounding boxes and scores.
326
327
            sample_idx_list (List[int]): List of input sample idx.
            class_names (List[str]): A list of class names.
VVsssssk's avatar
VVsssssk committed
328
329
330
331
332
333
            pklfile_prefix (str, optional): The prefix of pkl file.
                Defaults to None.
            submission_prefix (str, optional): The prefix of submission file.
                Defaults to None.

        Returns:
334
            List[dict]: A list of dictionaries with the kitti format.
VVsssssk's avatar
VVsssssk committed
335
336
337
338
        """
        assert len(net_outputs) == len(self.data_infos), \
            'invalid list length of network outputs'
        if submission_prefix is not None:
339
            mmengine.mkdir_or_exist(submission_prefix)
VVsssssk's avatar
VVsssssk committed
340
341

        det_annos = []
342
        print('\nConverting 3D prediction to KITTI format')
VVsssssk's avatar
VVsssssk committed
343
        for idx, pred_dicts in enumerate(
344
                mmengine.track_iter_progress(net_outputs)):
345
            sample_idx = sample_idx_list[idx]
VVsssssk's avatar
VVsssssk committed
346
347
348
            info = self.data_infos[sample_idx]
            # Here default used 'CAM2' to compute metric. If you want to
            # use another camera, please modify it.
349
350
            image_shape = (info['images'][self.default_cam_key]['height'],
                           info['images'][self.default_cam_key]['width'])
VVsssssk's avatar
VVsssssk committed
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
            box_dict = self.convert_valid_bboxes(pred_dicts, info)
            anno = {
                'name': [],
                'truncated': [],
                'occluded': [],
                'alpha': [],
                'bbox': [],
                'dimensions': [],
                'location': [],
                'rotation_y': [],
                'score': []
            }
            if len(box_dict['bbox']) > 0:
                box_2d_preds = box_dict['bbox']
                box_preds = box_dict['box3d_camera']
                scores = box_dict['scores']
                box_preds_lidar = box_dict['box3d_lidar']
                label_preds = box_dict['label_preds']
369
                pred_box_type_3d = box_dict['pred_box_type_3d']
VVsssssk's avatar
VVsssssk committed
370
371
372
373
374
375
376
377
378

                for box, box_lidar, bbox, score, label in zip(
                        box_preds, box_preds_lidar, box_2d_preds, scores,
                        label_preds):
                    bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])
                    bbox[:2] = np.maximum(bbox[:2], [0, 0])
                    anno['name'].append(class_names[int(label)])
                    anno['truncated'].append(0.0)
                    anno['occluded'].append(0)
379
380
381
382
383
384
                    if pred_box_type_3d == CameraInstance3DBoxes:
                        anno['alpha'].append(-np.arctan2(box[0], box[2]) +
                                             box[6])
                    elif pred_box_type_3d == LiDARInstance3DBoxes:
                        anno['alpha'].append(
                            -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6])
VVsssssk's avatar
VVsssssk committed
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
                    anno['bbox'].append(bbox)
                    anno['dimensions'].append(box[3:6])
                    anno['location'].append(box[:3])
                    anno['rotation_y'].append(box[6])
                    anno['score'].append(score)

                anno = {k: np.stack(v) for k, v in anno.items()}
            else:
                anno = {
                    'name': np.array([]),
                    'truncated': np.array([]),
                    'occluded': np.array([]),
                    'alpha': np.array([]),
                    'bbox': np.zeros([0, 4]),
                    'dimensions': np.zeros([0, 3]),
                    'location': np.zeros([0, 3]),
                    'rotation_y': np.array([]),
                    'score': np.array([]),
                }

            if submission_prefix is not None:
                curr_file = f'{submission_prefix}/{sample_idx:06d}.txt'
                with open(curr_file, 'w') as f:
                    bbox = anno['bbox']
                    loc = anno['location']
                    dims = anno['dimensions']  # lhw -> hwl

                    for idx in range(len(bbox)):
                        print(
                            '{} -1 -1 {:.4f} {:.4f} {:.4f} {:.4f} '
                            '{:.4f} {:.4f} {:.4f} '
                            '{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format(
                                anno['name'][idx], anno['alpha'][idx],
                                bbox[idx][0], bbox[idx][1], bbox[idx][2],
                                bbox[idx][3], dims[idx][1], dims[idx][2],
                                dims[idx][0], loc[idx][0], loc[idx][1],
                                loc[idx][2], anno['rotation_y'][idx],
                                anno['score'][idx]),
                            file=f)

425
426
            anno['sample_idx'] = np.array(
                [sample_idx] * len(anno['score']), dtype=np.int64)
VVsssssk's avatar
VVsssssk committed
427

428
            det_annos.append(anno)
VVsssssk's avatar
VVsssssk committed
429
430
431
432
433
434

        if pklfile_prefix is not None:
            if not pklfile_prefix.endswith(('.pkl', '.pickle')):
                out = f'{pklfile_prefix}.pkl'
            else:
                out = pklfile_prefix
435
            mmengine.dump(det_annos, out)
VVsssssk's avatar
VVsssssk committed
436
437
438
439
            print(f'Result is saved to {out}.')

        return det_annos

440
441
442
443
444
445
446
    def bbox2result_kitti2d(
            self,
            net_outputs: List[dict],
            sample_idx_list: List[int],
            class_names: List[str],
            pklfile_prefix: Optional[str] = None,
            submission_prefix: Optional[str] = None) -> List[dict]:
VVsssssk's avatar
VVsssssk committed
447
448
449
450
        """Convert 2D detection results to kitti format for evaluation and test
        submission.

        Args:
451
452
            net_outputs (List[dict]): List of dict storing the inferenced
                bounding boxes and scores.
453
454
            sample_idx_list (List[int]): List of input sample idx.
            class_names (List[str]): A list of class names.
VVsssssk's avatar
VVsssssk committed
455
456
457
458
459
460
            pklfile_prefix (str, optional): The prefix of pkl file.
                Defaults to None.
            submission_prefix (str, optional): The prefix of submission file.
                Defaults to None.

        Returns:
461
            List[dict]: A list of dictionaries with the kitti format.
VVsssssk's avatar
VVsssssk committed
462
463
464
465
        """
        assert len(net_outputs) == len(self.data_infos), \
            'invalid list length of network outputs'
        det_annos = []
466
        print('\nConverting 2D prediction to KITTI format')
VVsssssk's avatar
VVsssssk committed
467
        for i, bboxes_per_sample in enumerate(
468
                mmengine.track_iter_progress(net_outputs)):
VVsssssk's avatar
VVsssssk committed
469
470
471
472
473
474
475
476
477
478
            anno = dict(
                name=[],
                truncated=[],
                occluded=[],
                alpha=[],
                bbox=[],
                dimensions=[],
                location=[],
                rotation_y=[],
                score=[])
479
            sample_idx = sample_idx_list[i]
VVsssssk's avatar
VVsssssk committed
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500

            num_example = 0
            bbox = bboxes_per_sample['bboxes']
            for i in range(bbox.shape[0]):
                anno['name'].append(class_names[int(
                    bboxes_per_sample['labels'][i])])
                anno['truncated'].append(0.0)
                anno['occluded'].append(0)
                anno['alpha'].append(0.0)
                anno['bbox'].append(bbox[i, :4])
                # set dimensions (height, width, length) to zero
                anno['dimensions'].append(
                    np.zeros(shape=[3], dtype=np.float32))
                # set the 3D translation to (-1000, -1000, -1000)
                anno['location'].append(
                    np.ones(shape=[3], dtype=np.float32) * (-1000.0))
                anno['rotation_y'].append(0.0)
                anno['score'].append(bboxes_per_sample['scores'][i])
                num_example += 1

            if num_example == 0:
501
502
503
504
505
506
507
508
509
510
511
                anno = dict(
                    name=np.array([]),
                    truncated=np.array([]),
                    occluded=np.array([]),
                    alpha=np.array([]),
                    bbox=np.zeros([0, 4]),
                    dimensions=np.zeros([0, 3]),
                    location=np.zeros([0, 3]),
                    rotation_y=np.array([]),
                    score=np.array([]),
                )
VVsssssk's avatar
VVsssssk committed
512
513
514
            else:
                anno = {k: np.stack(v) for k, v in anno.items()}

515
            anno['sample_idx'] = np.array(
VVsssssk's avatar
VVsssssk committed
516
                [sample_idx] * num_example, dtype=np.int64)
517
            det_annos.append(anno)
VVsssssk's avatar
VVsssssk committed
518
519
520
521
522
523

        if pklfile_prefix is not None:
            if not pklfile_prefix.endswith(('.pkl', '.pickle')):
                out = f'{pklfile_prefix}.pkl'
            else:
                out = pklfile_prefix
524
            mmengine.dump(det_annos, out)
VVsssssk's avatar
VVsssssk committed
525
526
527
528
            print(f'Result is saved to {out}.')

        if submission_prefix is not None:
            # save file in submission format
529
            mmengine.mkdir_or_exist(submission_prefix)
VVsssssk's avatar
VVsssssk committed
530
531
            print(f'Saving KITTI submission to {submission_prefix}')
            for i, anno in enumerate(det_annos):
532
                sample_idx = sample_idx_list[i]
VVsssssk's avatar
VVsssssk committed
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
                cur_det_file = f'{submission_prefix}/{sample_idx:06d}.txt'
                with open(cur_det_file, 'w') as f:
                    bbox = anno['bbox']
                    loc = anno['location']
                    dims = anno['dimensions'][::-1]  # lhw -> hwl
                    for idx in range(len(bbox)):
                        print(
                            '{} -1 -1 {:4f} {:4f} {:4f} {:4f} {:4f} {:4f} '
                            '{:4f} {:4f} {:4f} {:4f} {:4f} {:4f} {:4f}'.format(
                                anno['name'][idx],
                                anno['alpha'][idx],
                                *bbox[idx],  # 4 float
                                *dims[idx],  # 3 float
                                *loc[idx],  # 3 float
                                anno['rotation_y'][idx],
                                anno['score'][idx]),
                            file=f,
                        )
            print(f'Result is saved to {submission_prefix}')

        return det_annos

555
    def convert_valid_bboxes(self, box_dict: dict, info: dict) -> dict:
VVsssssk's avatar
VVsssssk committed
556
557
558
559
560
        """Convert the predicted boxes into valid ones.

        Args:
            box_dict (dict): Box dictionaries to be converted.

561
562
563
                - bboxes_3d (:obj:`BaseInstance3DBoxes`): 3D bounding boxes.
                - scores_3d (Tensor): Scores of boxes.
                - labels_3d (Tensor): Class labels of boxes.
VVsssssk's avatar
VVsssssk committed
564
565
566
567
568
            info (dict): Data info.

        Returns:
            dict: Valid predicted boxes.

569
570
571
572
573
574
575
576
            - bbox (np.ndarray): 2D bounding boxes.
            - box3d_camera (np.ndarray): 3D bounding boxes in
              camera coordinate.
            - box3d_lidar (np.ndarray): 3D bounding boxes in
              LiDAR coordinate.
            - scores (np.ndarray): Scores of boxes.
            - label_preds (np.ndarray): Class label predictions.
            - sample_idx (int): Sample index.
VVsssssk's avatar
VVsssssk committed
577
578
579
580
581
        """
        # TODO: refactor this function
        box_preds = box_dict['bboxes_3d']
        scores = box_dict['scores_3d']
        labels = box_dict['labels_3d']
582
        sample_idx = info['sample_idx']
VVsssssk's avatar
VVsssssk committed
583
584
585
586
587
588
589
590
591
592
593
594
        box_preds.limit_yaw(offset=0.5, period=np.pi * 2)

        if len(box_preds) == 0:
            return dict(
                bbox=np.zeros([0, 4]),
                box3d_camera=np.zeros([0, 7]),
                box3d_lidar=np.zeros([0, 7]),
                scores=np.zeros([0]),
                label_preds=np.zeros([0, 4]),
                sample_idx=sample_idx)
        # Here default used 'CAM2' to compute metric. If you want to
        # use another camera, please modify it.
595
596
597
598
        lidar2cam = np.array(
            info['images'][self.default_cam_key]['lidar2cam']).astype(
                np.float32)
        P2 = np.array(info['images'][self.default_cam_key]['cam2img']).astype(
VVsssssk's avatar
VVsssssk committed
599
            np.float32)
600
601
        img_shape = (info['images'][self.default_cam_key]['height'],
                     info['images'][self.default_cam_key]['width'])
VVsssssk's avatar
VVsssssk committed
602
603
        P2 = box_preds.tensor.new_tensor(P2)

604
605
606
607
608
609
610
        if isinstance(box_preds, LiDARInstance3DBoxes):
            box_preds_camera = box_preds.convert_to(Box3DMode.CAM, lidar2cam)
            box_preds_lidar = box_preds
        elif isinstance(box_preds, CameraInstance3DBoxes):
            box_preds_camera = box_preds
            box_preds_lidar = box_preds.convert_to(Box3DMode.LIDAR,
                                                   np.linalg.inv(lidar2cam))
VVsssssk's avatar
VVsssssk committed
611
612
613
614
615
616
617
618
619
620
621
622
623

        box_corners = box_preds_camera.corners
        box_corners_in_image = points_cam2img(box_corners, P2)
        # box_corners_in_image: [N, 8, 2]
        minxy = torch.min(box_corners_in_image, dim=1)[0]
        maxxy = torch.max(box_corners_in_image, dim=1)[0]
        box_2d_preds = torch.cat([minxy, maxxy], dim=1)
        # Post-processing
        # check box_preds_camera
        image_shape = box_preds.tensor.new_tensor(img_shape)
        valid_cam_inds = ((box_2d_preds[:, 0] < image_shape[1]) &
                          (box_2d_preds[:, 1] < image_shape[0]) &
                          (box_2d_preds[:, 2] > 0) & (box_2d_preds[:, 3] > 0))
624
625
626
627
628
629
630
631
        # check box_preds_lidar
        if isinstance(box_preds, LiDARInstance3DBoxes):
            limit_range = box_preds.tensor.new_tensor(self.pcd_limit_range)
            valid_pcd_inds = ((box_preds_lidar.center > limit_range[:3]) &
                              (box_preds_lidar.center < limit_range[3:]))
            valid_inds = valid_cam_inds & valid_pcd_inds.all(-1)
        else:
            valid_inds = valid_cam_inds
VVsssssk's avatar
VVsssssk committed
632
633
634
635

        if valid_inds.sum() > 0:
            return dict(
                bbox=box_2d_preds[valid_inds, :].numpy(),
636
                pred_box_type_3d=type(box_preds),
VVsssssk's avatar
VVsssssk committed
637
                box3d_camera=box_preds_camera[valid_inds].tensor.numpy(),
638
                box3d_lidar=box_preds_lidar[valid_inds].tensor.numpy(),
VVsssssk's avatar
VVsssssk committed
639
640
641
642
643
644
                scores=scores[valid_inds].numpy(),
                label_preds=labels[valid_inds].numpy(),
                sample_idx=sample_idx)
        else:
            return dict(
                bbox=np.zeros([0, 4]),
645
                pred_box_type_3d=type(box_preds),
VVsssssk's avatar
VVsssssk committed
646
647
648
                box3d_camera=np.zeros([0, 7]),
                box3d_lidar=np.zeros([0, 7]),
                scores=np.zeros([0]),
649
                label_preds=np.zeros([0]),
VVsssssk's avatar
VVsssssk committed
650
                sample_idx=sample_idx)