nuscenes_metric.py 31.7 KB
Newer Older
VVsssssk's avatar
VVsssssk committed
1
2
3
4
5
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from os import path as osp
from typing import Dict, List, Optional, Sequence, Tuple, Union

6
import mmengine
VVsssssk's avatar
VVsssssk committed
7
8
import numpy as np
import pyquaternion
ZCMax's avatar
ZCMax committed
9
import torch
10
from mmengine import Config, load
VVsssssk's avatar
VVsssssk committed
11
12
13
14
15
16
from mmengine.evaluator import BaseMetric
from mmengine.logging import MMLogger
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.data_classes import DetectionConfig
from nuscenes.utils.data_classes import Box as NuScenesBox

zhangshilong's avatar
zhangshilong committed
17
from mmdet3d.models.layers import box3d_multiclass_nms
VVsssssk's avatar
VVsssssk committed
18
from mmdet3d.registry import METRICS
zhangshilong's avatar
zhangshilong committed
19
20
from mmdet3d.structures import (CameraInstance3DBoxes, LiDARInstance3DBoxes,
                                bbox3d2result, xywhr2xyxyr)
VVsssssk's avatar
VVsssssk committed
21
22
23
24
25
26
27
28
29


@METRICS.register_module()
class NuScenesMetric(BaseMetric):
    """Nuscenes evaluation metric.

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
30
31
        metric (str or List[str]): Metrics to be evaluated.
            Defaults to 'bbox'.
VVsssssk's avatar
VVsssssk committed
32
33
34
35
36
37
        modality (dict): Modality to specify the sensor data used
            as input. Defaults to dict(use_camera=False, use_lidar=True).
        prefix (str, optional): The prefix that will be added in the metric
            names to disambiguate homonymous metrics of different evaluators.
            If prefix is not provided in the argument, self.default_prefix
            will be used instead. Defaults to None.
38
39
40
41
        format_only (bool): Format the output results without perform
            evaluation. It is useful when you want to format the result
            to a specific format and submit it to the test server.
            Defaults to False.
VVsssssk's avatar
VVsssssk committed
42
43
        jsonfile_prefix (str, optional): The prefix of json files including
            the file path and the prefix of filename, e.g., "a/b/prefix".
44
            If not specified, a temp file will be created. Defaults to None.
VVsssssk's avatar
VVsssssk committed
45
        eval_version (str): Configuration version of evaluation.
46
            Defaults to 'detection_cvpr_2019'.
VVsssssk's avatar
VVsssssk committed
47
48
49
        collect_device (str): Device name used for collecting results
            from different ranks during distributed training. Must be 'cpu' or
            'gpu'. Defaults to 'cpu'.
50
51
        backend_args (dict, optional): Arguments to instantiate the
            corresponding backend. Defaults to None.
VVsssssk's avatar
VVsssssk committed
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
    """
    NameMapping = {
        'movable_object.barrier': 'barrier',
        'vehicle.bicycle': 'bicycle',
        'vehicle.bus.bendy': 'bus',
        'vehicle.bus.rigid': 'bus',
        'vehicle.car': 'car',
        'vehicle.construction': 'construction_vehicle',
        'vehicle.motorcycle': 'motorcycle',
        'human.pedestrian.adult': 'pedestrian',
        'human.pedestrian.child': 'pedestrian',
        'human.pedestrian.construction_worker': 'pedestrian',
        'human.pedestrian.police_officer': 'pedestrian',
        'movable_object.trafficcone': 'traffic_cone',
        'vehicle.trailer': 'trailer',
        'vehicle.truck': 'truck'
    }
    DefaultAttribute = {
        'car': 'vehicle.parked',
        'pedestrian': 'pedestrian.moving',
        'trailer': 'vehicle.parked',
        'truck': 'vehicle.parked',
        'bus': 'vehicle.moving',
        'motorcycle': 'cycle.without_rider',
        'construction_vehicle': 'vehicle.parked',
        'bicycle': 'cycle.without_rider',
        'barrier': '',
        'traffic_cone': '',
    }
    # https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa
    ErrNameMapping = {
        'trans_err': 'mATE',
        'scale_err': 'mASE',
        'orient_err': 'mAOE',
        'vel_err': 'mAVE',
        'attr_err': 'mAAE'
    }

90
91
92
93
94
95
96
97
98
99
100
    def __init__(self,
                 data_root: str,
                 ann_file: str,
                 metric: Union[str, List[str]] = 'bbox',
                 modality: dict = dict(use_camera=False, use_lidar=True),
                 prefix: Optional[str] = None,
                 format_only: bool = False,
                 jsonfile_prefix: Optional[str] = None,
                 eval_version: str = 'detection_cvpr_2019',
                 collect_device: str = 'cpu',
                 backend_args: Optional[dict] = None) -> None:
VVsssssk's avatar
VVsssssk committed
101
102
103
104
105
106
107
108
109
110
111
        self.default_prefix = 'NuScenes metric'
        super(NuScenesMetric, self).__init__(
            collect_device=collect_device, prefix=prefix)
        if modality is None:
            modality = dict(
                use_camera=False,
                use_lidar=True,
            )
        self.ann_file = ann_file
        self.data_root = data_root
        self.modality = modality
112
113
114
115
116
117
118
        self.format_only = format_only
        if self.format_only:
            assert jsonfile_prefix is not None, 'jsonfile_prefix must be '
            'not None when format_only is True, otherwise the result files '
            'will be saved to a temp directory which will be cleanup at '
            'the end.'

VVsssssk's avatar
VVsssssk committed
119
        self.jsonfile_prefix = jsonfile_prefix
120
        self.backend_args = backend_args
VVsssssk's avatar
VVsssssk committed
121
122
123
124
125
126

        self.metrics = metric if isinstance(metric, list) else [metric]

        self.eval_version = eval_version
        self.eval_detection_configs = config_factory(self.eval_version)

127
    def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
VVsssssk's avatar
VVsssssk committed
128
129
130
131
132
133
134
        """Process one batch of data samples and predictions.

        The processed results should be stored in ``self.results``,
        which will be used to compute the metrics when all batches
        have been processed.

        Args:
135
136
            data_batch (dict): A batch of data from the dataloader.
            data_samples (Sequence[dict]): A batch of outputs from
VVsssssk's avatar
VVsssssk committed
137
138
                the model.
        """
139
        for data_sample in data_samples:
VVsssssk's avatar
VVsssssk committed
140
            result = dict()
141
142
143
144
145
146
147
148
149
150
            pred_3d = data_sample['pred_instances_3d']
            pred_2d = data_sample['pred_instances']
            for attr_name in pred_3d:
                pred_3d[attr_name] = pred_3d[attr_name].to('cpu')
            result['pred_instances_3d'] = pred_3d
            for attr_name in pred_2d:
                pred_2d[attr_name] = pred_2d[attr_name].to('cpu')
            result['pred_instances'] = pred_2d
            sample_idx = data_sample['sample_idx']
            result['sample_idx'] = sample_idx
151
            self.results.append(result)
VVsssssk's avatar
VVsssssk committed
152

153
    def compute_metrics(self, results: List[dict]) -> Dict[str, float]:
VVsssssk's avatar
VVsssssk committed
154
155
156
        """Compute the metrics from processed results.

        Args:
157
            results (List[dict]): The processed results of each batch.
VVsssssk's avatar
VVsssssk committed
158
159
160
161
162
163
164

        Returns:
            Dict[str, float]: The computed metrics. The keys are the names of
            the metrics, and the values are corresponding results.
        """
        logger: MMLogger = MMLogger.get_current_instance()

165
        classes = self.dataset_meta['classes']
VVsssssk's avatar
VVsssssk committed
166
167
        self.version = self.dataset_meta['version']
        # load annotations
168
        self.data_infos = load(
169
            self.ann_file, backend_args=self.backend_args)['data_list']
VVsssssk's avatar
VVsssssk committed
170
171
172
173
        result_dict, tmp_dir = self.format_results(results, classes,
                                                   self.jsonfile_prefix)

        metric_dict = {}
174
175
176
177
178
179

        if self.format_only:
            logger.info('results are saved in '
                        f'{osp.basename(self.jsonfile_prefix)}')
            return metric_dict

VVsssssk's avatar
VVsssssk committed
180
181
182
183
184
185
186
187
188
189
190
191
192
        for metric in self.metrics:
            ap_dict = self.nus_evaluate(
                result_dict, classes=classes, metric=metric, logger=logger)
            for result in ap_dict:
                metric_dict[result] = ap_dict[result]

        if tmp_dir is not None:
            tmp_dir.cleanup()
        return metric_dict

    def nus_evaluate(self,
                     result_dict: dict,
                     metric: str = 'bbox',
193
194
                     classes: Optional[List[str]] = None,
                     logger: Optional[MMLogger] = None) -> Dict[str, float]:
VVsssssk's avatar
VVsssssk committed
195
196
197
198
        """Evaluation in Nuscenes protocol.

        Args:
            result_dict (dict): Formatted results of the dataset.
199
200
201
            metric (str): Metrics to be evaluated. Defaults to 'bbox'.
            classes (List[str], optional): A list of class name.
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
202
            logger (MMLogger, optional): Logger used for printing
203
                related information during evaluation. Defaults to None.
VVsssssk's avatar
VVsssssk committed
204
205

        Returns:
206
            Dict[str, float]: Results of each evaluation metric.
VVsssssk's avatar
VVsssssk committed
207
208
209
210
211
212
213
214
215
        """
        metric_dict = dict()
        for name in result_dict:
            print(f'Evaluating bboxes of {name}')
            ret_dict = self._evaluate_single(
                result_dict[name], classes=classes, result_name=name)
        metric_dict.update(ret_dict)
        return metric_dict

216
217
218
219
220
    def _evaluate_single(
            self,
            result_path: str,
            classes: Optional[List[str]] = None,
            result_name: str = 'pred_instances_3d') -> Dict[str, float]:
VVsssssk's avatar
VVsssssk committed
221
222
223
224
        """Evaluation for a single model in nuScenes protocol.

        Args:
            result_path (str): Path of the result file.
225
226
            classes (List[str], optional): A list of class name.
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
227
            result_name (str): Result name in the metric prefix.
228
                Defaults to 'pred_instances_3d'.
VVsssssk's avatar
VVsssssk committed
229
230

        Returns:
231
            Dict[str, float]: Dictionary of evaluation details.
VVsssssk's avatar
VVsssssk committed
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
        """
        from nuscenes import NuScenes
        from nuscenes.eval.detection.evaluate import NuScenesEval

        output_dir = osp.join(*osp.split(result_path)[:-1])
        nusc = NuScenes(
            version=self.version, dataroot=self.data_root, verbose=False)
        eval_set_map = {
            'v1.0-mini': 'mini_val',
            'v1.0-trainval': 'val',
        }
        nusc_eval = NuScenesEval(
            nusc,
            config=self.eval_detection_configs,
            result_path=result_path,
            eval_set=eval_set_map[self.version],
            output_dir=output_dir,
            verbose=False)
        nusc_eval.main(render_curves=False)

        # record metrics
253
        metrics = mmengine.load(osp.join(output_dir, 'metrics_summary.json'))
VVsssssk's avatar
VVsssssk committed
254
255
256
257
        detail = dict()
        metric_prefix = f'{result_name}_NuScenes'
        for name in classes:
            for k, v in metrics['label_aps'][name].items():
258
                val = float(f'{v:.4f}')
VVsssssk's avatar
VVsssssk committed
259
260
                detail[f'{metric_prefix}/{name}_AP_dist_{k}'] = val
            for k, v in metrics['label_tp_errors'][name].items():
261
                val = float(f'{v:.4f}')
VVsssssk's avatar
VVsssssk committed
262
263
                detail[f'{metric_prefix}/{name}_{k}'] = val
            for k, v in metrics['tp_errors'].items():
264
                val = float(f'{v:.4f}')
VVsssssk's avatar
VVsssssk committed
265
266
267
268
269
270
                detail[f'{metric_prefix}/{self.ErrNameMapping[k]}'] = val

        detail[f'{metric_prefix}/NDS'] = metrics['nd_score']
        detail[f'{metric_prefix}/mAP'] = metrics['mean_ap']
        return detail

271
272
273
274
275
276
    def format_results(
        self,
        results: List[dict],
        classes: Optional[List[str]] = None,
        jsonfile_prefix: Optional[str] = None
    ) -> Tuple[dict, Union[tempfile.TemporaryDirectory, None]]:
VVsssssk's avatar
VVsssssk committed
277
278
279
        """Format the mmdet3d results to standard NuScenes json file.

        Args:
280
281
282
            results (List[dict]): Testing results of the dataset.
            classes (List[str], optional): A list of class name.
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
283
284
285
            jsonfile_prefix (str, optional): The prefix of json files. It
                includes the file path and the prefix of filename, e.g.,
                "a/b/prefix". If not specified, a temp file will be created.
286
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
287
288
289

        Returns:
            tuple: Returns (result_dict, tmp_dir), where `result_dict` is a
290
291
292
            dict containing the json filepaths, `tmp_dir` is the temporal
            directory created for saving json files when
            `jsonfile_prefix` is not specified.
VVsssssk's avatar
VVsssssk committed
293
294
295
296
297
298
299
300
301
        """
        assert isinstance(results, list), 'results must be a list'

        if jsonfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            jsonfile_prefix = osp.join(tmp_dir.name, 'results')
        else:
            tmp_dir = None
        result_dict = dict()
302
        sample_idx_list = [result['sample_idx'] for result in results]
VVsssssk's avatar
VVsssssk committed
303
304
305
306
307
308

        for name in results[0]:
            if 'pred' in name and '3d' in name and name[0] != '_':
                print(f'\nFormating bboxes of {name}')
                results_ = [out[name] for out in results]
                tmp_file_ = osp.join(jsonfile_prefix, name)
ChaimZhu's avatar
ChaimZhu committed
309
                box_type_3d = type(results_[0]['bboxes_3d'])
ZCMax's avatar
ZCMax committed
310
311
                if box_type_3d == LiDARInstance3DBoxes:
                    result_dict[name] = self._format_lidar_bbox(
312
                        results_, sample_idx_list, classes, tmp_file_)
ZCMax's avatar
ZCMax committed
313
314
                elif box_type_3d == CameraInstance3DBoxes:
                    result_dict[name] = self._format_camera_bbox(
315
                        results_, sample_idx_list, classes, tmp_file_)
ZCMax's avatar
ZCMax committed
316

VVsssssk's avatar
VVsssssk committed
317
318
        return result_dict, tmp_dir

319
    def get_attr_name(self, attr_idx: int, label_name: str) -> str:
ChaimZhu's avatar
ChaimZhu committed
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
        """Get attribute from predicted index.

        This is a workaround to predict attribute when the predicted velocity
        is not reliable. We map the predicted attribute index to the one
        in the attribute set. If it is consistent with the category, we will
        keep it. Otherwise, we will use the default attribute.

        Args:
            attr_idx (int): Attribute index.
            label_name (str): Predicted category name.

        Returns:
            str: Predicted attribute name.
        """
        # TODO: Simplify the variable name
        AttrMapping_rev2 = [
            'cycle.with_rider', 'cycle.without_rider', 'pedestrian.moving',
            'pedestrian.standing', 'pedestrian.sitting_lying_down',
            'vehicle.moving', 'vehicle.parked', 'vehicle.stopped', 'None'
        ]
        if label_name == 'car' or label_name == 'bus' \
            or label_name == 'truck' or label_name == 'trailer' \
                or label_name == 'construction_vehicle':
            if AttrMapping_rev2[attr_idx] == 'vehicle.moving' or \
                AttrMapping_rev2[attr_idx] == 'vehicle.parked' or \
                    AttrMapping_rev2[attr_idx] == 'vehicle.stopped':
                return AttrMapping_rev2[attr_idx]
            else:
                return self.DefaultAttribute[label_name]
        elif label_name == 'pedestrian':
            if AttrMapping_rev2[attr_idx] == 'pedestrian.moving' or \
                AttrMapping_rev2[attr_idx] == 'pedestrian.standing' or \
                    AttrMapping_rev2[attr_idx] == \
                    'pedestrian.sitting_lying_down':
                return AttrMapping_rev2[attr_idx]
            else:
                return self.DefaultAttribute[label_name]
        elif label_name == 'bicycle' or label_name == 'motorcycle':
            if AttrMapping_rev2[attr_idx] == 'cycle.with_rider' or \
                    AttrMapping_rev2[attr_idx] == 'cycle.without_rider':
                return AttrMapping_rev2[attr_idx]
            else:
                return self.DefaultAttribute[label_name]
        else:
            return self.DefaultAttribute[label_name]

ZCMax's avatar
ZCMax committed
366
367
    def _format_camera_bbox(self,
                            results: List[dict],
368
369
370
                            sample_idx_list: List[int],
                            classes: Optional[List[str]] = None,
                            jsonfile_prefix: Optional[str] = None) -> str:
ZCMax's avatar
ZCMax committed
371
372
373
        """Convert the results to the standard format.

        Args:
374
375
376
377
378
            results (List[dict]): Testing results of the dataset.
            sample_idx_list (List[int]): List of result sample idx.
            classes (List[str], optional): A list of class name.
                Defaults to None.
            jsonfile_prefix (str, optional): The prefix of the output jsonfile.
ZCMax's avatar
ZCMax committed
379
                You can specify the output directory/filename by
380
                modifying the jsonfile_prefix. Defaults to None.
ZCMax's avatar
ZCMax committed
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400

        Returns:
            str: Path of the output json file.
        """
        nusc_annos = {}

        print('Start to convert detection format...')

        # Camera types in Nuscenes datasets
        camera_types = [
            'CAM_FRONT',
            'CAM_FRONT_RIGHT',
            'CAM_FRONT_LEFT',
            'CAM_BACK',
            'CAM_BACK_LEFT',
            'CAM_BACK_RIGHT',
        ]

        CAM_NUM = 6

401
        for i, det in enumerate(mmengine.track_iter_progress(results)):
ZCMax's avatar
ZCMax committed
402

403
            sample_idx = sample_idx_list[i]
ZCMax's avatar
ZCMax committed
404

405
406
            frame_sample_idx = sample_idx // CAM_NUM
            camera_type_id = sample_idx % CAM_NUM
ZCMax's avatar
ZCMax committed
407
408
409
410
411
412
413
414

            if camera_type_id == 0:
                boxes_per_frame = []
                attrs_per_frame = []

            # need to merge results from images of the same sample
            annos = []
            boxes, attrs = output_to_nusc_box(det)
415
            sample_token = self.data_infos[frame_sample_idx]['token']
ZCMax's avatar
ZCMax committed
416
417
            camera_type = camera_types[camera_type_id]
            boxes, attrs = cam_nusc_box_to_global(
418
                self.data_infos[frame_sample_idx], boxes, attrs, classes,
ChaimZhu's avatar
ChaimZhu committed
419
                self.eval_detection_configs, camera_type)
ZCMax's avatar
ZCMax committed
420
421
422
            boxes_per_frame.extend(boxes)
            attrs_per_frame.extend(attrs)
            # Remove redundant predictions caused by overlap of images
423
            if (sample_idx + 1) % CAM_NUM != 0:
ZCMax's avatar
ZCMax committed
424
                continue
425
            boxes = global_nusc_box_to_cam(self.data_infos[frame_sample_idx],
ChaimZhu's avatar
ChaimZhu committed
426
427
                                           boxes_per_frame, classes,
                                           self.eval_detection_configs)
ZCMax's avatar
ZCMax committed
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
            cam_boxes3d, scores, labels = nusc_box_to_cam_box3d(boxes)
            # box nms 3d over 6 images in a frame
            # TODO: move this global setting into config
            nms_cfg = dict(
                use_rotate_nms=True,
                nms_across_levels=False,
                nms_pre=4096,
                nms_thr=0.05,
                score_thr=0.01,
                min_bbox_size=0,
                max_per_frame=500)
            nms_cfg = Config(nms_cfg)
            cam_boxes3d_for_nms = xywhr2xyxyr(cam_boxes3d.bev)
            boxes3d = cam_boxes3d.tensor
            # generate attr scores from attr labels
            attrs = labels.new_tensor([attr for attr in attrs_per_frame])
            boxes3d, scores, labels, attrs = box3d_multiclass_nms(
                boxes3d,
                cam_boxes3d_for_nms,
                scores,
                nms_cfg.score_thr,
                nms_cfg.max_per_frame,
                nms_cfg,
                mlvl_attr_scores=attrs)
            cam_boxes3d = CameraInstance3DBoxes(boxes3d, box_dim=9)
            det = bbox3d2result(cam_boxes3d, scores, labels, attrs)
            boxes, attrs = output_to_nusc_box(det)
            boxes, attrs = cam_nusc_box_to_global(
456
                self.data_infos[frame_sample_idx], boxes, attrs, classes,
ChaimZhu's avatar
ChaimZhu committed
457
                self.eval_detection_configs)
ZCMax's avatar
ZCMax committed
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482

            for i, box in enumerate(boxes):
                name = classes[box.label]
                attr = self.get_attr_name(attrs[i], name)
                nusc_anno = dict(
                    sample_token=sample_token,
                    translation=box.center.tolist(),
                    size=box.wlh.tolist(),
                    rotation=box.orientation.elements.tolist(),
                    velocity=box.velocity[:2].tolist(),
                    detection_name=name,
                    detection_score=box.score,
                    attribute_name=attr)
                annos.append(nusc_anno)
            # other views results of the same frame should be concatenated
            if sample_token in nusc_annos:
                nusc_annos[sample_token].extend(annos)
            else:
                nusc_annos[sample_token] = annos

        nusc_submissions = {
            'meta': self.modality,
            'results': nusc_annos,
        }

483
        mmengine.mkdir_or_exist(jsonfile_prefix)
ZCMax's avatar
ZCMax committed
484
        res_path = osp.join(jsonfile_prefix, 'results_nusc.json')
485
        print(f'Results writes to {res_path}')
486
        mmengine.dump(nusc_submissions, res_path)
ZCMax's avatar
ZCMax committed
487
488
489
490
        return res_path

    def _format_lidar_bbox(self,
                           results: List[dict],
491
492
493
                           sample_idx_list: List[int],
                           classes: Optional[List[str]] = None,
                           jsonfile_prefix: Optional[str] = None) -> str:
VVsssssk's avatar
VVsssssk committed
494
495
496
        """Convert the results to the standard format.

        Args:
497
498
499
500
            results (List[dict]): Testing results of the dataset.
            sample_idx_list (List[int]): List of result sample idx.
            classes (List[str], optional): A list of class name.
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
501
502
            jsonfile_prefix (str, optional): The prefix of the output jsonfile.
                You can specify the output directory/filename by
503
                modifying the jsonfile_prefix. Defaults to None.
VVsssssk's avatar
VVsssssk committed
504
505
506
507
508
509
510

        Returns:
            str: Path of the output json file.
        """
        nusc_annos = {}

        print('Start to convert detection format...')
511
        for i, det in enumerate(mmengine.track_iter_progress(results)):
VVsssssk's avatar
VVsssssk committed
512
            annos = []
513
            boxes, attrs = output_to_nusc_box(det)
514
515
516
517
            sample_idx = sample_idx_list[i]
            sample_token = self.data_infos[sample_idx]['token']
            boxes = lidar_nusc_box_to_global(self.data_infos[sample_idx],
                                             boxes, classes,
VVsssssk's avatar
VVsssssk committed
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
                                             self.eval_detection_configs)
            for i, box in enumerate(boxes):
                name = classes[box.label]
                if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2:
                    if name in [
                            'car',
                            'construction_vehicle',
                            'bus',
                            'truck',
                            'trailer',
                    ]:
                        attr = 'vehicle.moving'
                    elif name in ['bicycle', 'motorcycle']:
                        attr = 'cycle.with_rider'
                    else:
                        attr = self.DefaultAttribute[name]
                else:
                    if name in ['pedestrian']:
                        attr = 'pedestrian.standing'
                    elif name in ['bus']:
                        attr = 'vehicle.stopped'
                    else:
                        attr = self.DefaultAttribute[name]

                nusc_anno = dict(
                    sample_token=sample_token,
                    translation=box.center.tolist(),
                    size=box.wlh.tolist(),
                    rotation=box.orientation.elements.tolist(),
                    velocity=box.velocity[:2].tolist(),
                    detection_name=name,
                    detection_score=box.score,
                    attribute_name=attr)
                annos.append(nusc_anno)
            nusc_annos[sample_token] = annos
        nusc_submissions = {
            'meta': self.modality,
            'results': nusc_annos,
        }
557
        mmengine.mkdir_or_exist(jsonfile_prefix)
VVsssssk's avatar
VVsssssk committed
558
        res_path = osp.join(jsonfile_prefix, 'results_nusc.json')
559
        print(f'Results writes to {res_path}')
560
        mmengine.dump(nusc_submissions, res_path)
VVsssssk's avatar
VVsssssk committed
561
562
563
        return res_path


564
565
def output_to_nusc_box(
        detection: dict) -> Tuple[List[NuScenesBox], Union[np.ndarray, None]]:
VVsssssk's avatar
VVsssssk committed
566
567
568
569
570
    """Convert the output to the box class in the nuScenes.

    Args:
        detection (dict): Detection results.

ChaimZhu's avatar
ChaimZhu committed
571
            - bboxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox.
VVsssssk's avatar
VVsssssk committed
572
573
574
575
            - scores_3d (torch.Tensor): Detection scores.
            - labels_3d (torch.Tensor): Predicted box labels.

    Returns:
576
577
        Tuple[List[:obj:`NuScenesBox`], np.ndarray or None]:
        List of standard NuScenesBoxes and attribute labels.
VVsssssk's avatar
VVsssssk committed
578
    """
ChaimZhu's avatar
ChaimZhu committed
579
    bbox3d = detection['bboxes_3d']
VVsssssk's avatar
VVsssssk committed
580
581
    scores = detection['scores_3d'].numpy()
    labels = detection['labels_3d'].numpy()
ZCMax's avatar
ZCMax committed
582
583
584
    attrs = None
    if 'attr_labels' in detection:
        attrs = detection['attr_labels'].numpy()
VVsssssk's avatar
VVsssssk committed
585
586
587
588
589
590

    box_gravity_center = bbox3d.gravity_center.numpy()
    box_dims = bbox3d.dims.numpy()
    box_yaw = bbox3d.yaw.numpy()

    box_list = []
ZCMax's avatar
ZCMax committed
591

592
    if isinstance(bbox3d, LiDARInstance3DBoxes):
ZCMax's avatar
ZCMax committed
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
        # our LiDAR coordinate system -> nuScenes box coordinate system
        nus_box_dims = box_dims[:, [1, 0, 2]]
        for i in range(len(bbox3d)):
            quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i])
            velocity = (*bbox3d.tensor[i, 7:9], 0.0)
            # velo_val = np.linalg.norm(box3d[i, 7:9])
            # velo_ori = box3d[i, 6]
            # velocity = (
            # velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0)
            box = NuScenesBox(
                box_gravity_center[i],
                nus_box_dims[i],
                quat,
                label=labels[i],
                score=scores[i],
                velocity=velocity)
            box_list.append(box)
610
    elif isinstance(bbox3d, CameraInstance3DBoxes):
ZCMax's avatar
ZCMax committed
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
        # our Camera coordinate system -> nuScenes box coordinate system
        # convert the dim/rot to nuscbox convention
        nus_box_dims = box_dims[:, [2, 0, 1]]
        nus_box_yaw = -box_yaw
        for i in range(len(bbox3d)):
            q1 = pyquaternion.Quaternion(
                axis=[0, 0, 1], radians=nus_box_yaw[i])
            q2 = pyquaternion.Quaternion(axis=[1, 0, 0], radians=np.pi / 2)
            quat = q2 * q1
            velocity = (bbox3d.tensor[i, 7], 0.0, bbox3d.tensor[i, 8])
            box = NuScenesBox(
                box_gravity_center[i],
                nus_box_dims[i],
                quat,
                label=labels[i],
                score=scores[i],
                velocity=velocity)
            box_list.append(box)
    else:
        raise NotImplementedError(
631
            f'Do not support convert {type(bbox3d)} bboxes '
ZCMax's avatar
ZCMax committed
632
633
634
            'to standard NuScenesBoxes.')

    return box_list, attrs
VVsssssk's avatar
VVsssssk committed
635
636
637
638
639
640
641
642
643
644


def lidar_nusc_box_to_global(
        info: dict, boxes: List[NuScenesBox], classes: List[str],
        eval_configs: DetectionConfig) -> List[NuScenesBox]:
    """Convert the box from ego to global coordinate.

    Args:
        info (dict): Info for a specific sample data, including the
            calibration information.
645
646
647
        boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
        classes (List[str]): Mapped classes in the evaluation.
        eval_configs (:obj:`DetectionConfig`): Evaluation configuration object.
VVsssssk's avatar
VVsssssk committed
648
649

    Returns:
650
651
        List[:obj:`DetectionConfig`]: List of standard NuScenesBoxes in the
        global coordinate.
VVsssssk's avatar
VVsssssk committed
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
    """
    box_list = []
    for box in boxes:
        # Move box to ego vehicle coord system
        lidar2ego = np.array(info['lidar_points']['lidar2ego'])
        box.rotate(
            pyquaternion.Quaternion(matrix=lidar2ego, rtol=1e-05, atol=1e-07))
        box.translate(lidar2ego[:3, 3])
        # filter det in ego.
        cls_range_map = eval_configs.class_range
        radius = np.linalg.norm(box.center[:2], 2)
        det_range = cls_range_map[classes[box.label]]
        if radius > det_range:
            continue
        # Move box to global coord system
        ego2global = np.array(info['ego2global'])
        box.rotate(
            pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05, atol=1e-07))
        box.translate(ego2global[:3, 3])
        box_list.append(box)
    return box_list
ZCMax's avatar
ZCMax committed
673
674


ChaimZhu's avatar
ChaimZhu committed
675
676
677
def cam_nusc_box_to_global(
    info: dict,
    boxes: List[NuScenesBox],
678
    attrs: np.ndarray,
ChaimZhu's avatar
ChaimZhu committed
679
680
681
    classes: List[str],
    eval_configs: DetectionConfig,
    camera_type: str = 'CAM_FRONT',
682
) -> Tuple[List[NuScenesBox], List[int]]:
ZCMax's avatar
ZCMax committed
683
684
685
686
687
    """Convert the box from camera to global coordinate.

    Args:
        info (dict): Info for a specific sample data, including the
            calibration information.
688
689
690
691
692
        boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
        attrs (np.ndarray): Predicted attributes.
        classes (List[str]): Mapped classes in the evaluation.
        eval_configs (:obj:`DetectionConfig`): Evaluation configuration object.
        camera_type (str): Type of camera. Defaults to 'CAM_FRONT'.
ZCMax's avatar
ZCMax committed
693
694

    Returns:
695
696
697
        Tuple[List[:obj:`NuScenesBox`], List[int]]:
        List of standard NuScenesBoxes in the global coordinate and
        attribute label.
ZCMax's avatar
ZCMax committed
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
    """
    box_list = []
    attr_list = []
    for (box, attr) in zip(boxes, attrs):
        # Move box to ego vehicle coord system
        cam2ego = np.array(info['images'][camera_type]['cam2ego'])
        box.rotate(
            pyquaternion.Quaternion(matrix=cam2ego, rtol=1e-05, atol=1e-07))
        box.translate(cam2ego[:3, 3])
        # filter det in ego.
        cls_range_map = eval_configs.class_range
        radius = np.linalg.norm(box.center[:2], 2)
        det_range = cls_range_map[classes[box.label]]
        if radius > det_range:
            continue
        # Move box to global coord system
        ego2global = np.array(info['ego2global'])
        box.rotate(
            pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05, atol=1e-07))
        box.translate(ego2global[:3, 3])
        box_list.append(box)
        attr_list.append(attr)
    return box_list, attr_list


def global_nusc_box_to_cam(info: dict, boxes: List[NuScenesBox],
                           classes: List[str],
                           eval_configs: DetectionConfig) -> List[NuScenesBox]:
    """Convert the box from global to camera coordinate.

    Args:
        info (dict): Info for a specific sample data, including the
            calibration information.
731
732
733
        boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
        classes (List[str]): Mapped classes in the evaluation.
        eval_configs (:obj:`DetectionConfig`): Evaluation configuration object.
ZCMax's avatar
ZCMax committed
734
735

    Returns:
736
737
        List[:obj:`NuScenesBox`]: List of standard NuScenesBoxes in
        camera coordinate.
ZCMax's avatar
ZCMax committed
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
    """
    box_list = []
    for box in boxes:
        # Move box to ego vehicle coord system
        ego2global = np.array(info['ego2global'])
        box.translate(-ego2global[:3, 3])
        box.rotate(
            pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05,
                                    atol=1e-07).inverse)
        # filter det in ego.
        cls_range_map = eval_configs.class_range
        radius = np.linalg.norm(box.center[:2], 2)
        det_range = cls_range_map[classes[box.label]]
        if radius > det_range:
            continue
        # Move box to camera coord system
        cam2ego = np.array(info['images']['CAM_FRONT']['cam2ego'])
ChaimZhu's avatar
ChaimZhu committed
755
        box.translate(-cam2ego[:3, 3])
ZCMax's avatar
ZCMax committed
756
757
758
759
760
761
762
        box.rotate(
            pyquaternion.Quaternion(matrix=cam2ego, rtol=1e-05,
                                    atol=1e-07).inverse)
        box_list.append(box)
    return box_list


763
764
765
def nusc_box_to_cam_box3d(
    boxes: List[NuScenesBox]
) -> Tuple[CameraInstance3DBoxes, torch.Tensor, torch.Tensor]:
ZCMax's avatar
ZCMax committed
766
767
768
    """Convert boxes from :obj:`NuScenesBox` to :obj:`CameraInstance3DBoxes`.

    Args:
769
        boxes (:obj:`List[NuScenesBox]`): List of predicted NuScenesBoxes.
ZCMax's avatar
ZCMax committed
770
771

    Returns:
772
773
        Tuple[:obj:`CameraInstance3DBoxes`, torch.Tensor, torch.Tensor]:
        Converted 3D bounding boxes, scores and labels.
ZCMax's avatar
ZCMax committed
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
    """
    locs = torch.Tensor([b.center for b in boxes]).view(-1, 3)
    dims = torch.Tensor([b.wlh for b in boxes]).view(-1, 3)
    rots = torch.Tensor([b.orientation.yaw_pitch_roll[0]
                         for b in boxes]).view(-1, 1)
    velocity = torch.Tensor([b.velocity[0::2] for b in boxes]).view(-1, 2)

    # convert nusbox to cambox convention
    dims[:, [0, 1, 2]] = dims[:, [1, 2, 0]]
    rots = -rots

    boxes_3d = torch.cat([locs, dims, rots, velocity], dim=1).cuda()
    cam_boxes3d = CameraInstance3DBoxes(
        boxes_3d, box_dim=9, origin=(0.5, 0.5, 0.5))
    scores = torch.Tensor([b.score for b in boxes]).cuda()
    labels = torch.LongTensor([b.label for b in boxes]).cuda()
    nms_scores = scores.new_zeros(scores.shape[0], 10 + 1)
    indices = labels.new_tensor(list(range(scores.shape[0])))
    nms_scores[indices, labels] = scores
    return cam_boxes3d, nms_scores, labels