nuscenes_metric.py 31.6 KB
Newer Older
VVsssssk's avatar
VVsssssk committed
1
2
3
4
5
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from os import path as osp
from typing import Dict, List, Optional, Sequence, Tuple, Union

6
import mmengine
VVsssssk's avatar
VVsssssk committed
7
8
import numpy as np
import pyquaternion
ZCMax's avatar
ZCMax committed
9
import torch
10
from mmengine import Config, load
VVsssssk's avatar
VVsssssk committed
11
12
13
14
15
16
from mmengine.evaluator import BaseMetric
from mmengine.logging import MMLogger
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.data_classes import DetectionConfig
from nuscenes.utils.data_classes import Box as NuScenesBox

zhangshilong's avatar
zhangshilong committed
17
from mmdet3d.models.layers import box3d_multiclass_nms
VVsssssk's avatar
VVsssssk committed
18
from mmdet3d.registry import METRICS
zhangshilong's avatar
zhangshilong committed
19
20
from mmdet3d.structures import (CameraInstance3DBoxes, LiDARInstance3DBoxes,
                                bbox3d2result, xywhr2xyxyr)
VVsssssk's avatar
VVsssssk committed
21
22
23
24
25
26
27
28
29


@METRICS.register_module()
class NuScenesMetric(BaseMetric):
    """Nuscenes evaluation metric.

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
30
31
32
        metric (str or List[str]): Metrics to be evaluated. Defaults to 'bbox'.
        modality (dict): Modality to specify the sensor data used as input.
            Defaults to dict(use_camera=False, use_lidar=True).
VVsssssk's avatar
VVsssssk committed
33
34
        prefix (str, optional): The prefix that will be added in the metric
            names to disambiguate homonymous metrics of different evaluators.
35
36
            If prefix is not provided in the argument, self.default_prefix will
            be used instead. Defaults to None.
37
        format_only (bool): Format the output results without perform
38
39
            evaluation. It is useful when you want to format the result to a
            specific format and submit it to the test server.
40
            Defaults to False.
41
42
        jsonfile_prefix (str, optional): The prefix of json files including the
            file path and the prefix of filename, e.g., "a/b/prefix".
43
            If not specified, a temp file will be created. Defaults to None.
VVsssssk's avatar
VVsssssk committed
44
        eval_version (str): Configuration version of evaluation.
45
            Defaults to 'detection_cvpr_2019'.
46
47
        collect_device (str): Device name used for collecting results from
            different ranks during distributed training. Must be 'cpu' or
VVsssssk's avatar
VVsssssk committed
48
            'gpu'. Defaults to 'cpu'.
49
50
        backend_args (dict, optional): Arguments to instantiate the
            corresponding backend. Defaults to None.
VVsssssk's avatar
VVsssssk committed
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
    """
    NameMapping = {
        'movable_object.barrier': 'barrier',
        'vehicle.bicycle': 'bicycle',
        'vehicle.bus.bendy': 'bus',
        'vehicle.bus.rigid': 'bus',
        'vehicle.car': 'car',
        'vehicle.construction': 'construction_vehicle',
        'vehicle.motorcycle': 'motorcycle',
        'human.pedestrian.adult': 'pedestrian',
        'human.pedestrian.child': 'pedestrian',
        'human.pedestrian.construction_worker': 'pedestrian',
        'human.pedestrian.police_officer': 'pedestrian',
        'movable_object.trafficcone': 'traffic_cone',
        'vehicle.trailer': 'trailer',
        'vehicle.truck': 'truck'
    }
    DefaultAttribute = {
        'car': 'vehicle.parked',
        'pedestrian': 'pedestrian.moving',
        'trailer': 'vehicle.parked',
        'truck': 'vehicle.parked',
        'bus': 'vehicle.moving',
        'motorcycle': 'cycle.without_rider',
        'construction_vehicle': 'vehicle.parked',
        'bicycle': 'cycle.without_rider',
        'barrier': '',
        'traffic_cone': '',
    }
    # https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa
    ErrNameMapping = {
        'trans_err': 'mATE',
        'scale_err': 'mASE',
        'orient_err': 'mAOE',
        'vel_err': 'mAVE',
        'attr_err': 'mAAE'
    }

89
90
91
92
93
94
95
96
97
98
99
    def __init__(self,
                 data_root: str,
                 ann_file: str,
                 metric: Union[str, List[str]] = 'bbox',
                 modality: dict = dict(use_camera=False, use_lidar=True),
                 prefix: Optional[str] = None,
                 format_only: bool = False,
                 jsonfile_prefix: Optional[str] = None,
                 eval_version: str = 'detection_cvpr_2019',
                 collect_device: str = 'cpu',
                 backend_args: Optional[dict] = None) -> None:
VVsssssk's avatar
VVsssssk committed
100
101
102
103
104
105
106
107
108
109
110
        self.default_prefix = 'NuScenes metric'
        super(NuScenesMetric, self).__init__(
            collect_device=collect_device, prefix=prefix)
        if modality is None:
            modality = dict(
                use_camera=False,
                use_lidar=True,
            )
        self.ann_file = ann_file
        self.data_root = data_root
        self.modality = modality
111
112
        self.format_only = format_only
        if self.format_only:
113
114
115
            assert jsonfile_prefix is not None, 'jsonfile_prefix must be not '
            'None when format_only is True, otherwise the result files will '
            'be saved to a temp directory which will be cleanup at the end.'
116

VVsssssk's avatar
VVsssssk committed
117
        self.jsonfile_prefix = jsonfile_prefix
118
        self.backend_args = backend_args
VVsssssk's avatar
VVsssssk committed
119
120
121
122
123
124

        self.metrics = metric if isinstance(metric, list) else [metric]

        self.eval_version = eval_version
        self.eval_detection_configs = config_factory(self.eval_version)

125
    def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
VVsssssk's avatar
VVsssssk committed
126
127
        """Process one batch of data samples and predictions.

128
129
        The processed results should be stored in ``self.results``, which will
        be used to compute the metrics when all batches have been processed.
VVsssssk's avatar
VVsssssk committed
130
131

        Args:
132
            data_batch (dict): A batch of data from the dataloader.
133
            data_samples (Sequence[dict]): A batch of outputs from the model.
VVsssssk's avatar
VVsssssk committed
134
        """
135
        for data_sample in data_samples:
VVsssssk's avatar
VVsssssk committed
136
            result = dict()
137
138
139
140
141
142
143
144
145
146
            pred_3d = data_sample['pred_instances_3d']
            pred_2d = data_sample['pred_instances']
            for attr_name in pred_3d:
                pred_3d[attr_name] = pred_3d[attr_name].to('cpu')
            result['pred_instances_3d'] = pred_3d
            for attr_name in pred_2d:
                pred_2d[attr_name] = pred_2d[attr_name].to('cpu')
            result['pred_instances'] = pred_2d
            sample_idx = data_sample['sample_idx']
            result['sample_idx'] = sample_idx
147
            self.results.append(result)
VVsssssk's avatar
VVsssssk committed
148

149
    def compute_metrics(self, results: List[dict]) -> Dict[str, float]:
VVsssssk's avatar
VVsssssk committed
150
151
152
        """Compute the metrics from processed results.

        Args:
153
            results (List[dict]): The processed results of each batch.
VVsssssk's avatar
VVsssssk committed
154
155
156
157
158
159
160

        Returns:
            Dict[str, float]: The computed metrics. The keys are the names of
            the metrics, and the values are corresponding results.
        """
        logger: MMLogger = MMLogger.get_current_instance()

161
        classes = self.dataset_meta['classes']
VVsssssk's avatar
VVsssssk committed
162
163
        self.version = self.dataset_meta['version']
        # load annotations
164
        self.data_infos = load(
165
            self.ann_file, backend_args=self.backend_args)['data_list']
VVsssssk's avatar
VVsssssk committed
166
167
168
169
        result_dict, tmp_dir = self.format_results(results, classes,
                                                   self.jsonfile_prefix)

        metric_dict = {}
170
171

        if self.format_only:
172
173
            logger.info(
                f'results are saved in {osp.basename(self.jsonfile_prefix)}')
174
175
            return metric_dict

VVsssssk's avatar
VVsssssk committed
176
177
178
179
180
181
182
183
184
185
186
187
188
        for metric in self.metrics:
            ap_dict = self.nus_evaluate(
                result_dict, classes=classes, metric=metric, logger=logger)
            for result in ap_dict:
                metric_dict[result] = ap_dict[result]

        if tmp_dir is not None:
            tmp_dir.cleanup()
        return metric_dict

    def nus_evaluate(self,
                     result_dict: dict,
                     metric: str = 'bbox',
189
190
                     classes: Optional[List[str]] = None,
                     logger: Optional[MMLogger] = None) -> Dict[str, float]:
VVsssssk's avatar
VVsssssk committed
191
192
193
194
        """Evaluation in Nuscenes protocol.

        Args:
            result_dict (dict): Formatted results of the dataset.
195
196
197
            metric (str): Metrics to be evaluated. Defaults to 'bbox'.
            classes (List[str], optional): A list of class name.
                Defaults to None.
198
199
            logger (MMLogger, optional): Logger used for printing related
                information during evaluation. Defaults to None.
VVsssssk's avatar
VVsssssk committed
200
201

        Returns:
202
            Dict[str, float]: Results of each evaluation metric.
VVsssssk's avatar
VVsssssk committed
203
204
205
206
207
208
        """
        metric_dict = dict()
        for name in result_dict:
            print(f'Evaluating bboxes of {name}')
            ret_dict = self._evaluate_single(
                result_dict[name], classes=classes, result_name=name)
209
            metric_dict.update(ret_dict)
VVsssssk's avatar
VVsssssk committed
210
211
        return metric_dict

212
213
214
215
216
    def _evaluate_single(
            self,
            result_path: str,
            classes: Optional[List[str]] = None,
            result_name: str = 'pred_instances_3d') -> Dict[str, float]:
VVsssssk's avatar
VVsssssk committed
217
218
219
220
        """Evaluation for a single model in nuScenes protocol.

        Args:
            result_path (str): Path of the result file.
221
222
            classes (List[str], optional): A list of class name.
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
223
            result_name (str): Result name in the metric prefix.
224
                Defaults to 'pred_instances_3d'.
VVsssssk's avatar
VVsssssk committed
225
226

        Returns:
227
            Dict[str, float]: Dictionary of evaluation details.
VVsssssk's avatar
VVsssssk committed
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
        """
        from nuscenes import NuScenes
        from nuscenes.eval.detection.evaluate import NuScenesEval

        output_dir = osp.join(*osp.split(result_path)[:-1])
        nusc = NuScenes(
            version=self.version, dataroot=self.data_root, verbose=False)
        eval_set_map = {
            'v1.0-mini': 'mini_val',
            'v1.0-trainval': 'val',
        }
        nusc_eval = NuScenesEval(
            nusc,
            config=self.eval_detection_configs,
            result_path=result_path,
            eval_set=eval_set_map[self.version],
            output_dir=output_dir,
            verbose=False)
        nusc_eval.main(render_curves=False)

        # record metrics
249
        metrics = mmengine.load(osp.join(output_dir, 'metrics_summary.json'))
VVsssssk's avatar
VVsssssk committed
250
251
252
253
        detail = dict()
        metric_prefix = f'{result_name}_NuScenes'
        for name in classes:
            for k, v in metrics['label_aps'][name].items():
254
                val = float(f'{v:.4f}')
VVsssssk's avatar
VVsssssk committed
255
256
                detail[f'{metric_prefix}/{name}_AP_dist_{k}'] = val
            for k, v in metrics['label_tp_errors'][name].items():
257
                val = float(f'{v:.4f}')
VVsssssk's avatar
VVsssssk committed
258
259
                detail[f'{metric_prefix}/{name}_{k}'] = val
            for k, v in metrics['tp_errors'].items():
260
                val = float(f'{v:.4f}')
VVsssssk's avatar
VVsssssk committed
261
262
263
264
265
266
                detail[f'{metric_prefix}/{self.ErrNameMapping[k]}'] = val

        detail[f'{metric_prefix}/NDS'] = metrics['nd_score']
        detail[f'{metric_prefix}/mAP'] = metrics['mean_ap']
        return detail

267
268
269
270
271
272
    def format_results(
        self,
        results: List[dict],
        classes: Optional[List[str]] = None,
        jsonfile_prefix: Optional[str] = None
    ) -> Tuple[dict, Union[tempfile.TemporaryDirectory, None]]:
VVsssssk's avatar
VVsssssk committed
273
274
275
        """Format the mmdet3d results to standard NuScenes json file.

        Args:
276
277
278
            results (List[dict]): Testing results of the dataset.
            classes (List[str], optional): A list of class name.
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
279
280
281
            jsonfile_prefix (str, optional): The prefix of json files. It
                includes the file path and the prefix of filename, e.g.,
                "a/b/prefix". If not specified, a temp file will be created.
282
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
283
284

        Returns:
285
286
287
288
            tuple: Returns (result_dict, tmp_dir), where ``result_dict`` is a
            dict containing the json filepaths, ``tmp_dir`` is the temporal
            directory created for saving json files when ``jsonfile_prefix`` is
            not specified.
VVsssssk's avatar
VVsssssk committed
289
290
291
292
293
294
295
296
297
        """
        assert isinstance(results, list), 'results must be a list'

        if jsonfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            jsonfile_prefix = osp.join(tmp_dir.name, 'results')
        else:
            tmp_dir = None
        result_dict = dict()
298
        sample_idx_list = [result['sample_idx'] for result in results]
VVsssssk's avatar
VVsssssk committed
299
300
301
302
303
304

        for name in results[0]:
            if 'pred' in name and '3d' in name and name[0] != '_':
                print(f'\nFormating bboxes of {name}')
                results_ = [out[name] for out in results]
                tmp_file_ = osp.join(jsonfile_prefix, name)
ChaimZhu's avatar
ChaimZhu committed
305
                box_type_3d = type(results_[0]['bboxes_3d'])
ZCMax's avatar
ZCMax committed
306
307
                if box_type_3d == LiDARInstance3DBoxes:
                    result_dict[name] = self._format_lidar_bbox(
308
                        results_, sample_idx_list, classes, tmp_file_)
ZCMax's avatar
ZCMax committed
309
310
                elif box_type_3d == CameraInstance3DBoxes:
                    result_dict[name] = self._format_camera_bbox(
311
                        results_, sample_idx_list, classes, tmp_file_)
ZCMax's avatar
ZCMax committed
312

VVsssssk's avatar
VVsssssk committed
313
314
        return result_dict, tmp_dir

315
    def get_attr_name(self, attr_idx: int, label_name: str) -> str:
ChaimZhu's avatar
ChaimZhu committed
316
317
318
        """Get attribute from predicted index.

        This is a workaround to predict attribute when the predicted velocity
319
320
321
        is not reliable. We map the predicted attribute index to the one in the
        attribute set. If it is consistent with the category, we will keep it.
        Otherwise, we will use the default attribute.
ChaimZhu's avatar
ChaimZhu committed
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361

        Args:
            attr_idx (int): Attribute index.
            label_name (str): Predicted category name.

        Returns:
            str: Predicted attribute name.
        """
        # TODO: Simplify the variable name
        AttrMapping_rev2 = [
            'cycle.with_rider', 'cycle.without_rider', 'pedestrian.moving',
            'pedestrian.standing', 'pedestrian.sitting_lying_down',
            'vehicle.moving', 'vehicle.parked', 'vehicle.stopped', 'None'
        ]
        if label_name == 'car' or label_name == 'bus' \
            or label_name == 'truck' or label_name == 'trailer' \
                or label_name == 'construction_vehicle':
            if AttrMapping_rev2[attr_idx] == 'vehicle.moving' or \
                AttrMapping_rev2[attr_idx] == 'vehicle.parked' or \
                    AttrMapping_rev2[attr_idx] == 'vehicle.stopped':
                return AttrMapping_rev2[attr_idx]
            else:
                return self.DefaultAttribute[label_name]
        elif label_name == 'pedestrian':
            if AttrMapping_rev2[attr_idx] == 'pedestrian.moving' or \
                AttrMapping_rev2[attr_idx] == 'pedestrian.standing' or \
                    AttrMapping_rev2[attr_idx] == \
                    'pedestrian.sitting_lying_down':
                return AttrMapping_rev2[attr_idx]
            else:
                return self.DefaultAttribute[label_name]
        elif label_name == 'bicycle' or label_name == 'motorcycle':
            if AttrMapping_rev2[attr_idx] == 'cycle.with_rider' or \
                    AttrMapping_rev2[attr_idx] == 'cycle.without_rider':
                return AttrMapping_rev2[attr_idx]
            else:
                return self.DefaultAttribute[label_name]
        else:
            return self.DefaultAttribute[label_name]

ZCMax's avatar
ZCMax committed
362
363
    def _format_camera_bbox(self,
                            results: List[dict],
364
365
366
                            sample_idx_list: List[int],
                            classes: Optional[List[str]] = None,
                            jsonfile_prefix: Optional[str] = None) -> str:
ZCMax's avatar
ZCMax committed
367
368
369
        """Convert the results to the standard format.

        Args:
370
371
372
373
374
            results (List[dict]): Testing results of the dataset.
            sample_idx_list (List[int]): List of result sample idx.
            classes (List[str], optional): A list of class name.
                Defaults to None.
            jsonfile_prefix (str, optional): The prefix of the output jsonfile.
375
376
                You can specify the output directory/filename by modifying the
                jsonfile_prefix. Defaults to None.
ZCMax's avatar
ZCMax committed
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396

        Returns:
            str: Path of the output json file.
        """
        nusc_annos = {}

        print('Start to convert detection format...')

        # Camera types in Nuscenes datasets
        camera_types = [
            'CAM_FRONT',
            'CAM_FRONT_RIGHT',
            'CAM_FRONT_LEFT',
            'CAM_BACK',
            'CAM_BACK_LEFT',
            'CAM_BACK_RIGHT',
        ]

        CAM_NUM = 6

397
        for i, det in enumerate(mmengine.track_iter_progress(results)):
ZCMax's avatar
ZCMax committed
398

399
            sample_idx = sample_idx_list[i]
ZCMax's avatar
ZCMax committed
400

401
402
            frame_sample_idx = sample_idx // CAM_NUM
            camera_type_id = sample_idx % CAM_NUM
ZCMax's avatar
ZCMax committed
403
404
405
406
407
408
409
410

            if camera_type_id == 0:
                boxes_per_frame = []
                attrs_per_frame = []

            # need to merge results from images of the same sample
            annos = []
            boxes, attrs = output_to_nusc_box(det)
411
            sample_token = self.data_infos[frame_sample_idx]['token']
ZCMax's avatar
ZCMax committed
412
413
            camera_type = camera_types[camera_type_id]
            boxes, attrs = cam_nusc_box_to_global(
414
                self.data_infos[frame_sample_idx], boxes, attrs, classes,
ChaimZhu's avatar
ChaimZhu committed
415
                self.eval_detection_configs, camera_type)
ZCMax's avatar
ZCMax committed
416
417
418
            boxes_per_frame.extend(boxes)
            attrs_per_frame.extend(attrs)
            # Remove redundant predictions caused by overlap of images
419
            if (sample_idx + 1) % CAM_NUM != 0:
ZCMax's avatar
ZCMax committed
420
                continue
421
            boxes = global_nusc_box_to_cam(self.data_infos[frame_sample_idx],
ChaimZhu's avatar
ChaimZhu committed
422
423
                                           boxes_per_frame, classes,
                                           self.eval_detection_configs)
ZCMax's avatar
ZCMax committed
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
            cam_boxes3d, scores, labels = nusc_box_to_cam_box3d(boxes)
            # box nms 3d over 6 images in a frame
            # TODO: move this global setting into config
            nms_cfg = dict(
                use_rotate_nms=True,
                nms_across_levels=False,
                nms_pre=4096,
                nms_thr=0.05,
                score_thr=0.01,
                min_bbox_size=0,
                max_per_frame=500)
            nms_cfg = Config(nms_cfg)
            cam_boxes3d_for_nms = xywhr2xyxyr(cam_boxes3d.bev)
            boxes3d = cam_boxes3d.tensor
            # generate attr scores from attr labels
            attrs = labels.new_tensor([attr for attr in attrs_per_frame])
            boxes3d, scores, labels, attrs = box3d_multiclass_nms(
                boxes3d,
                cam_boxes3d_for_nms,
                scores,
                nms_cfg.score_thr,
                nms_cfg.max_per_frame,
                nms_cfg,
                mlvl_attr_scores=attrs)
            cam_boxes3d = CameraInstance3DBoxes(boxes3d, box_dim=9)
            det = bbox3d2result(cam_boxes3d, scores, labels, attrs)
            boxes, attrs = output_to_nusc_box(det)
            boxes, attrs = cam_nusc_box_to_global(
452
                self.data_infos[frame_sample_idx], boxes, attrs, classes,
ChaimZhu's avatar
ChaimZhu committed
453
                self.eval_detection_configs)
ZCMax's avatar
ZCMax committed
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478

            for i, box in enumerate(boxes):
                name = classes[box.label]
                attr = self.get_attr_name(attrs[i], name)
                nusc_anno = dict(
                    sample_token=sample_token,
                    translation=box.center.tolist(),
                    size=box.wlh.tolist(),
                    rotation=box.orientation.elements.tolist(),
                    velocity=box.velocity[:2].tolist(),
                    detection_name=name,
                    detection_score=box.score,
                    attribute_name=attr)
                annos.append(nusc_anno)
            # other views results of the same frame should be concatenated
            if sample_token in nusc_annos:
                nusc_annos[sample_token].extend(annos)
            else:
                nusc_annos[sample_token] = annos

        nusc_submissions = {
            'meta': self.modality,
            'results': nusc_annos,
        }

479
        mmengine.mkdir_or_exist(jsonfile_prefix)
ZCMax's avatar
ZCMax committed
480
        res_path = osp.join(jsonfile_prefix, 'results_nusc.json')
481
        print(f'Results writes to {res_path}')
482
        mmengine.dump(nusc_submissions, res_path)
ZCMax's avatar
ZCMax committed
483
484
485
486
        return res_path

    def _format_lidar_bbox(self,
                           results: List[dict],
487
488
489
                           sample_idx_list: List[int],
                           classes: Optional[List[str]] = None,
                           jsonfile_prefix: Optional[str] = None) -> str:
VVsssssk's avatar
VVsssssk committed
490
491
492
        """Convert the results to the standard format.

        Args:
493
494
495
496
            results (List[dict]): Testing results of the dataset.
            sample_idx_list (List[int]): List of result sample idx.
            classes (List[str], optional): A list of class name.
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
497
            jsonfile_prefix (str, optional): The prefix of the output jsonfile.
498
499
                You can specify the output directory/filename by modifying the
                jsonfile_prefix. Defaults to None.
VVsssssk's avatar
VVsssssk committed
500
501
502
503
504
505
506

        Returns:
            str: Path of the output json file.
        """
        nusc_annos = {}

        print('Start to convert detection format...')
507
        for i, det in enumerate(mmengine.track_iter_progress(results)):
VVsssssk's avatar
VVsssssk committed
508
            annos = []
509
            boxes, attrs = output_to_nusc_box(det)
510
511
512
513
            sample_idx = sample_idx_list[i]
            sample_token = self.data_infos[sample_idx]['token']
            boxes = lidar_nusc_box_to_global(self.data_infos[sample_idx],
                                             boxes, classes,
VVsssssk's avatar
VVsssssk committed
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
                                             self.eval_detection_configs)
            for i, box in enumerate(boxes):
                name = classes[box.label]
                if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2:
                    if name in [
                            'car',
                            'construction_vehicle',
                            'bus',
                            'truck',
                            'trailer',
                    ]:
                        attr = 'vehicle.moving'
                    elif name in ['bicycle', 'motorcycle']:
                        attr = 'cycle.with_rider'
                    else:
                        attr = self.DefaultAttribute[name]
                else:
                    if name in ['pedestrian']:
                        attr = 'pedestrian.standing'
                    elif name in ['bus']:
                        attr = 'vehicle.stopped'
                    else:
                        attr = self.DefaultAttribute[name]

                nusc_anno = dict(
                    sample_token=sample_token,
                    translation=box.center.tolist(),
                    size=box.wlh.tolist(),
                    rotation=box.orientation.elements.tolist(),
                    velocity=box.velocity[:2].tolist(),
                    detection_name=name,
                    detection_score=box.score,
                    attribute_name=attr)
                annos.append(nusc_anno)
            nusc_annos[sample_token] = annos
        nusc_submissions = {
            'meta': self.modality,
            'results': nusc_annos,
        }
553
        mmengine.mkdir_or_exist(jsonfile_prefix)
VVsssssk's avatar
VVsssssk committed
554
        res_path = osp.join(jsonfile_prefix, 'results_nusc.json')
555
        print(f'Results writes to {res_path}')
556
        mmengine.dump(nusc_submissions, res_path)
VVsssssk's avatar
VVsssssk committed
557
558
559
        return res_path


560
561
def output_to_nusc_box(
        detection: dict) -> Tuple[List[NuScenesBox], Union[np.ndarray, None]]:
VVsssssk's avatar
VVsssssk committed
562
563
564
565
566
    """Convert the output to the box class in the nuScenes.

    Args:
        detection (dict): Detection results.

ChaimZhu's avatar
ChaimZhu committed
567
            - bboxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox.
VVsssssk's avatar
VVsssssk committed
568
569
570
571
            - scores_3d (torch.Tensor): Detection scores.
            - labels_3d (torch.Tensor): Predicted box labels.

    Returns:
572
573
        Tuple[List[:obj:`NuScenesBox`], np.ndarray or None]: List of standard
        NuScenesBoxes and attribute labels.
VVsssssk's avatar
VVsssssk committed
574
    """
ChaimZhu's avatar
ChaimZhu committed
575
    bbox3d = detection['bboxes_3d']
VVsssssk's avatar
VVsssssk committed
576
577
    scores = detection['scores_3d'].numpy()
    labels = detection['labels_3d'].numpy()
ZCMax's avatar
ZCMax committed
578
579
580
    attrs = None
    if 'attr_labels' in detection:
        attrs = detection['attr_labels'].numpy()
VVsssssk's avatar
VVsssssk committed
581
582
583
584
585
586

    box_gravity_center = bbox3d.gravity_center.numpy()
    box_dims = bbox3d.dims.numpy()
    box_yaw = bbox3d.yaw.numpy()

    box_list = []
ZCMax's avatar
ZCMax committed
587

588
    if isinstance(bbox3d, LiDARInstance3DBoxes):
ZCMax's avatar
ZCMax committed
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
        # our LiDAR coordinate system -> nuScenes box coordinate system
        nus_box_dims = box_dims[:, [1, 0, 2]]
        for i in range(len(bbox3d)):
            quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i])
            velocity = (*bbox3d.tensor[i, 7:9], 0.0)
            # velo_val = np.linalg.norm(box3d[i, 7:9])
            # velo_ori = box3d[i, 6]
            # velocity = (
            # velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0)
            box = NuScenesBox(
                box_gravity_center[i],
                nus_box_dims[i],
                quat,
                label=labels[i],
                score=scores[i],
                velocity=velocity)
            box_list.append(box)
606
    elif isinstance(bbox3d, CameraInstance3DBoxes):
ZCMax's avatar
ZCMax committed
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
        # our Camera coordinate system -> nuScenes box coordinate system
        # convert the dim/rot to nuscbox convention
        nus_box_dims = box_dims[:, [2, 0, 1]]
        nus_box_yaw = -box_yaw
        for i in range(len(bbox3d)):
            q1 = pyquaternion.Quaternion(
                axis=[0, 0, 1], radians=nus_box_yaw[i])
            q2 = pyquaternion.Quaternion(axis=[1, 0, 0], radians=np.pi / 2)
            quat = q2 * q1
            velocity = (bbox3d.tensor[i, 7], 0.0, bbox3d.tensor[i, 8])
            box = NuScenesBox(
                box_gravity_center[i],
                nus_box_dims[i],
                quat,
                label=labels[i],
                score=scores[i],
                velocity=velocity)
            box_list.append(box)
    else:
        raise NotImplementedError(
627
            f'Do not support convert {type(bbox3d)} bboxes '
ZCMax's avatar
ZCMax committed
628
629
630
            'to standard NuScenesBoxes.')

    return box_list, attrs
VVsssssk's avatar
VVsssssk committed
631
632
633
634
635
636
637
638


def lidar_nusc_box_to_global(
        info: dict, boxes: List[NuScenesBox], classes: List[str],
        eval_configs: DetectionConfig) -> List[NuScenesBox]:
    """Convert the box from ego to global coordinate.

    Args:
639
640
        info (dict): Info for a specific sample data, including the calibration
            information.
641
642
643
        boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
        classes (List[str]): Mapped classes in the evaluation.
        eval_configs (:obj:`DetectionConfig`): Evaluation configuration object.
VVsssssk's avatar
VVsssssk committed
644
645

    Returns:
646
647
        List[:obj:`DetectionConfig`]: List of standard NuScenesBoxes in the
        global coordinate.
VVsssssk's avatar
VVsssssk committed
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
    """
    box_list = []
    for box in boxes:
        # Move box to ego vehicle coord system
        lidar2ego = np.array(info['lidar_points']['lidar2ego'])
        box.rotate(
            pyquaternion.Quaternion(matrix=lidar2ego, rtol=1e-05, atol=1e-07))
        box.translate(lidar2ego[:3, 3])
        # filter det in ego.
        cls_range_map = eval_configs.class_range
        radius = np.linalg.norm(box.center[:2], 2)
        det_range = cls_range_map[classes[box.label]]
        if radius > det_range:
            continue
        # Move box to global coord system
        ego2global = np.array(info['ego2global'])
        box.rotate(
            pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05, atol=1e-07))
        box.translate(ego2global[:3, 3])
        box_list.append(box)
    return box_list
ZCMax's avatar
ZCMax committed
669
670


ChaimZhu's avatar
ChaimZhu committed
671
672
673
def cam_nusc_box_to_global(
    info: dict,
    boxes: List[NuScenesBox],
674
    attrs: np.ndarray,
ChaimZhu's avatar
ChaimZhu committed
675
676
677
    classes: List[str],
    eval_configs: DetectionConfig,
    camera_type: str = 'CAM_FRONT',
678
) -> Tuple[List[NuScenesBox], List[int]]:
ZCMax's avatar
ZCMax committed
679
680
681
    """Convert the box from camera to global coordinate.

    Args:
682
683
        info (dict): Info for a specific sample data, including the calibration
            information.
684
685
686
687
688
        boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
        attrs (np.ndarray): Predicted attributes.
        classes (List[str]): Mapped classes in the evaluation.
        eval_configs (:obj:`DetectionConfig`): Evaluation configuration object.
        camera_type (str): Type of camera. Defaults to 'CAM_FRONT'.
ZCMax's avatar
ZCMax committed
689
690

    Returns:
691
692
        Tuple[List[:obj:`NuScenesBox`], List[int]]: List of standard
        NuScenesBoxes in the global coordinate and attribute label.
ZCMax's avatar
ZCMax committed
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
    """
    box_list = []
    attr_list = []
    for (box, attr) in zip(boxes, attrs):
        # Move box to ego vehicle coord system
        cam2ego = np.array(info['images'][camera_type]['cam2ego'])
        box.rotate(
            pyquaternion.Quaternion(matrix=cam2ego, rtol=1e-05, atol=1e-07))
        box.translate(cam2ego[:3, 3])
        # filter det in ego.
        cls_range_map = eval_configs.class_range
        radius = np.linalg.norm(box.center[:2], 2)
        det_range = cls_range_map[classes[box.label]]
        if radius > det_range:
            continue
        # Move box to global coord system
        ego2global = np.array(info['ego2global'])
        box.rotate(
            pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05, atol=1e-07))
        box.translate(ego2global[:3, 3])
        box_list.append(box)
        attr_list.append(attr)
    return box_list, attr_list


def global_nusc_box_to_cam(info: dict, boxes: List[NuScenesBox],
                           classes: List[str],
                           eval_configs: DetectionConfig) -> List[NuScenesBox]:
    """Convert the box from global to camera coordinate.

    Args:
724
725
        info (dict): Info for a specific sample data, including the calibration
            information.
726
727
728
        boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
        classes (List[str]): Mapped classes in the evaluation.
        eval_configs (:obj:`DetectionConfig`): Evaluation configuration object.
ZCMax's avatar
ZCMax committed
729
730

    Returns:
731
732
        List[:obj:`NuScenesBox`]: List of standard NuScenesBoxes in camera
        coordinate.
ZCMax's avatar
ZCMax committed
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
    """
    box_list = []
    for box in boxes:
        # Move box to ego vehicle coord system
        ego2global = np.array(info['ego2global'])
        box.translate(-ego2global[:3, 3])
        box.rotate(
            pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05,
                                    atol=1e-07).inverse)
        # filter det in ego.
        cls_range_map = eval_configs.class_range
        radius = np.linalg.norm(box.center[:2], 2)
        det_range = cls_range_map[classes[box.label]]
        if radius > det_range:
            continue
        # Move box to camera coord system
        cam2ego = np.array(info['images']['CAM_FRONT']['cam2ego'])
ChaimZhu's avatar
ChaimZhu committed
750
        box.translate(-cam2ego[:3, 3])
ZCMax's avatar
ZCMax committed
751
752
753
754
755
756
757
        box.rotate(
            pyquaternion.Quaternion(matrix=cam2ego, rtol=1e-05,
                                    atol=1e-07).inverse)
        box_list.append(box)
    return box_list


758
759
760
def nusc_box_to_cam_box3d(
    boxes: List[NuScenesBox]
) -> Tuple[CameraInstance3DBoxes, torch.Tensor, torch.Tensor]:
ZCMax's avatar
ZCMax committed
761
762
763
    """Convert boxes from :obj:`NuScenesBox` to :obj:`CameraInstance3DBoxes`.

    Args:
764
        boxes (:obj:`List[NuScenesBox]`): List of predicted NuScenesBoxes.
ZCMax's avatar
ZCMax committed
765
766

    Returns:
767
768
        Tuple[:obj:`CameraInstance3DBoxes`, torch.Tensor, torch.Tensor]:
        Converted 3D bounding boxes, scores and labels.
ZCMax's avatar
ZCMax committed
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
    """
    locs = torch.Tensor([b.center for b in boxes]).view(-1, 3)
    dims = torch.Tensor([b.wlh for b in boxes]).view(-1, 3)
    rots = torch.Tensor([b.orientation.yaw_pitch_roll[0]
                         for b in boxes]).view(-1, 1)
    velocity = torch.Tensor([b.velocity[0::2] for b in boxes]).view(-1, 2)

    # convert nusbox to cambox convention
    dims[:, [0, 1, 2]] = dims[:, [1, 2, 0]]
    rots = -rots

    boxes_3d = torch.cat([locs, dims, rots, velocity], dim=1).cuda()
    cam_boxes3d = CameraInstance3DBoxes(
        boxes_3d, box_dim=9, origin=(0.5, 0.5, 0.5))
    scores = torch.Tensor([b.score for b in boxes]).cuda()
    labels = torch.LongTensor([b.label for b in boxes]).cuda()
    nms_scores = scores.new_zeros(scores.shape[0], 10 + 1)
    indices = labels.new_tensor(list(range(scores.shape[0])))
    nms_scores[indices, labels] = scores
    return cam_boxes3d, nms_scores, labels