"test/srt/test_request_length_validation.py" did not exist on "0427416b59d11958a63f2ed344af3c5141d8e835"
nuscenes_metric.py 31.7 KB
Newer Older
VVsssssk's avatar
VVsssssk committed
1
2
3
4
5
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from os import path as osp
from typing import Dict, List, Optional, Sequence, Tuple, Union

6
import mmengine
VVsssssk's avatar
VVsssssk committed
7
8
import numpy as np
import pyquaternion
ZCMax's avatar
ZCMax committed
9
import torch
10
from mmengine import Config, load
VVsssssk's avatar
VVsssssk committed
11
12
13
14
15
16
from mmengine.evaluator import BaseMetric
from mmengine.logging import MMLogger
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.data_classes import DetectionConfig
from nuscenes.utils.data_classes import Box as NuScenesBox

zhangshilong's avatar
zhangshilong committed
17
from mmdet3d.models.layers import box3d_multiclass_nms
VVsssssk's avatar
VVsssssk committed
18
from mmdet3d.registry import METRICS
zhangshilong's avatar
zhangshilong committed
19
20
from mmdet3d.structures import (CameraInstance3DBoxes, LiDARInstance3DBoxes,
                                bbox3d2result, xywhr2xyxyr)
VVsssssk's avatar
VVsssssk committed
21
22
23
24
25
26
27
28
29


@METRICS.register_module()
class NuScenesMetric(BaseMetric):
    """Nuscenes evaluation metric.

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
30
31
        metric (str or List[str]): Metrics to be evaluated.
            Defaults to 'bbox'.
VVsssssk's avatar
VVsssssk committed
32
33
34
35
36
37
        modality (dict): Modality to specify the sensor data used
            as input. Defaults to dict(use_camera=False, use_lidar=True).
        prefix (str, optional): The prefix that will be added in the metric
            names to disambiguate homonymous metrics of different evaluators.
            If prefix is not provided in the argument, self.default_prefix
            will be used instead. Defaults to None.
38
39
40
41
        format_only (bool): Format the output results without perform
            evaluation. It is useful when you want to format the result
            to a specific format and submit it to the test server.
            Defaults to False.
VVsssssk's avatar
VVsssssk committed
42
43
        jsonfile_prefix (str, optional): The prefix of json files including
            the file path and the prefix of filename, e.g., "a/b/prefix".
44
            If not specified, a temp file will be created. Defaults to None.
VVsssssk's avatar
VVsssssk committed
45
        eval_version (str): Configuration version of evaluation.
46
            Defaults to 'detection_cvpr_2019'.
VVsssssk's avatar
VVsssssk committed
47
48
49
        collect_device (str): Device name used for collecting results
            from different ranks during distributed training. Must be 'cpu' or
            'gpu'. Defaults to 'cpu'.
50
51
52
        file_client_args (dict): Arguments to instantiate a FileClient.
            See :class:`mmengine.fileio.FileClient` for details.
            Defaults to dict(backend='disk').
VVsssssk's avatar
VVsssssk committed
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
    """
    NameMapping = {
        'movable_object.barrier': 'barrier',
        'vehicle.bicycle': 'bicycle',
        'vehicle.bus.bendy': 'bus',
        'vehicle.bus.rigid': 'bus',
        'vehicle.car': 'car',
        'vehicle.construction': 'construction_vehicle',
        'vehicle.motorcycle': 'motorcycle',
        'human.pedestrian.adult': 'pedestrian',
        'human.pedestrian.child': 'pedestrian',
        'human.pedestrian.construction_worker': 'pedestrian',
        'human.pedestrian.police_officer': 'pedestrian',
        'movable_object.trafficcone': 'traffic_cone',
        'vehicle.trailer': 'trailer',
        'vehicle.truck': 'truck'
    }
    DefaultAttribute = {
        'car': 'vehicle.parked',
        'pedestrian': 'pedestrian.moving',
        'trailer': 'vehicle.parked',
        'truck': 'vehicle.parked',
        'bus': 'vehicle.moving',
        'motorcycle': 'cycle.without_rider',
        'construction_vehicle': 'vehicle.parked',
        'bicycle': 'cycle.without_rider',
        'barrier': '',
        'traffic_cone': '',
    }
    # https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa
    ErrNameMapping = {
        'trans_err': 'mATE',
        'scale_err': 'mASE',
        'orient_err': 'mAOE',
        'vel_err': 'mAVE',
        'attr_err': 'mAAE'
    }

91
92
93
94
95
    def __init__(
        self,
        data_root: str,
        ann_file: str,
        metric: Union[str, List[str]] = 'bbox',
96
        modality: dict = dict(use_camera=False, use_lidar=True),
97
        prefix: Optional[str] = None,
98
        format_only: bool = False,
99
100
101
102
103
        jsonfile_prefix: Optional[str] = None,
        eval_version: str = 'detection_cvpr_2019',
        collect_device: str = 'cpu',
        file_client_args: dict = dict(backend='disk')
    ) -> None:
VVsssssk's avatar
VVsssssk committed
104
105
106
107
108
109
110
111
112
113
114
        self.default_prefix = 'NuScenes metric'
        super(NuScenesMetric, self).__init__(
            collect_device=collect_device, prefix=prefix)
        if modality is None:
            modality = dict(
                use_camera=False,
                use_lidar=True,
            )
        self.ann_file = ann_file
        self.data_root = data_root
        self.modality = modality
115
116
117
118
119
120
121
        self.format_only = format_only
        if self.format_only:
            assert jsonfile_prefix is not None, 'jsonfile_prefix must be '
            'not None when format_only is True, otherwise the result files '
            'will be saved to a temp directory which will be cleanup at '
            'the end.'

VVsssssk's avatar
VVsssssk committed
122
        self.jsonfile_prefix = jsonfile_prefix
123
        self.file_client_args = file_client_args
VVsssssk's avatar
VVsssssk committed
124
125
126
127
128
129

        self.metrics = metric if isinstance(metric, list) else [metric]

        self.eval_version = eval_version
        self.eval_detection_configs = config_factory(self.eval_version)

130
    def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
VVsssssk's avatar
VVsssssk committed
131
132
133
134
135
136
137
        """Process one batch of data samples and predictions.

        The processed results should be stored in ``self.results``,
        which will be used to compute the metrics when all batches
        have been processed.

        Args:
138
139
            data_batch (dict): A batch of data from the dataloader.
            data_samples (Sequence[dict]): A batch of outputs from
VVsssssk's avatar
VVsssssk committed
140
141
                the model.
        """
142
        for data_sample in data_samples:
VVsssssk's avatar
VVsssssk committed
143
            result = dict()
144
145
146
147
148
149
150
151
152
153
            pred_3d = data_sample['pred_instances_3d']
            pred_2d = data_sample['pred_instances']
            for attr_name in pred_3d:
                pred_3d[attr_name] = pred_3d[attr_name].to('cpu')
            result['pred_instances_3d'] = pred_3d
            for attr_name in pred_2d:
                pred_2d[attr_name] = pred_2d[attr_name].to('cpu')
            result['pred_instances'] = pred_2d
            sample_idx = data_sample['sample_idx']
            result['sample_idx'] = sample_idx
154
            self.results.append(result)
VVsssssk's avatar
VVsssssk committed
155

156
    def compute_metrics(self, results: List[dict]) -> Dict[str, float]:
VVsssssk's avatar
VVsssssk committed
157
158
159
        """Compute the metrics from processed results.

        Args:
160
            results (List[dict]): The processed results of each batch.
VVsssssk's avatar
VVsssssk committed
161
162
163
164
165
166
167

        Returns:
            Dict[str, float]: The computed metrics. The keys are the names of
            the metrics, and the values are corresponding results.
        """
        logger: MMLogger = MMLogger.get_current_instance()

168
        classes = self.dataset_meta['classes']
VVsssssk's avatar
VVsssssk committed
169
170
        self.version = self.dataset_meta['version']
        # load annotations
171
172
        self.data_infos = load(
            self.ann_file, file_client_args=self.file_client_args)['data_list']
VVsssssk's avatar
VVsssssk committed
173
174
175
176
        result_dict, tmp_dir = self.format_results(results, classes,
                                                   self.jsonfile_prefix)

        metric_dict = {}
177
178
179
180
181
182

        if self.format_only:
            logger.info('results are saved in '
                        f'{osp.basename(self.jsonfile_prefix)}')
            return metric_dict

VVsssssk's avatar
VVsssssk committed
183
184
185
186
187
188
189
190
191
192
193
194
195
        for metric in self.metrics:
            ap_dict = self.nus_evaluate(
                result_dict, classes=classes, metric=metric, logger=logger)
            for result in ap_dict:
                metric_dict[result] = ap_dict[result]

        if tmp_dir is not None:
            tmp_dir.cleanup()
        return metric_dict

    def nus_evaluate(self,
                     result_dict: dict,
                     metric: str = 'bbox',
196
197
                     classes: Optional[List[str]] = None,
                     logger: Optional[MMLogger] = None) -> Dict[str, float]:
VVsssssk's avatar
VVsssssk committed
198
199
200
201
        """Evaluation in Nuscenes protocol.

        Args:
            result_dict (dict): Formatted results of the dataset.
202
203
204
            metric (str): Metrics to be evaluated. Defaults to 'bbox'.
            classes (List[str], optional): A list of class name.
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
205
            logger (MMLogger, optional): Logger used for printing
206
                related information during evaluation. Defaults to None.
VVsssssk's avatar
VVsssssk committed
207
208

        Returns:
209
            Dict[str, float]: Results of each evaluation metric.
VVsssssk's avatar
VVsssssk committed
210
211
212
213
214
215
216
217
218
        """
        metric_dict = dict()
        for name in result_dict:
            print(f'Evaluating bboxes of {name}')
            ret_dict = self._evaluate_single(
                result_dict[name], classes=classes, result_name=name)
        metric_dict.update(ret_dict)
        return metric_dict

219
220
221
222
223
    def _evaluate_single(
            self,
            result_path: str,
            classes: Optional[List[str]] = None,
            result_name: str = 'pred_instances_3d') -> Dict[str, float]:
VVsssssk's avatar
VVsssssk committed
224
225
226
227
        """Evaluation for a single model in nuScenes protocol.

        Args:
            result_path (str): Path of the result file.
228
229
            classes (List[str], optional): A list of class name.
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
230
            result_name (str): Result name in the metric prefix.
231
                Defaults to 'pred_instances_3d'.
VVsssssk's avatar
VVsssssk committed
232
233

        Returns:
234
            Dict[str, float]: Dictionary of evaluation details.
VVsssssk's avatar
VVsssssk committed
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
        """
        from nuscenes import NuScenes
        from nuscenes.eval.detection.evaluate import NuScenesEval

        output_dir = osp.join(*osp.split(result_path)[:-1])
        nusc = NuScenes(
            version=self.version, dataroot=self.data_root, verbose=False)
        eval_set_map = {
            'v1.0-mini': 'mini_val',
            'v1.0-trainval': 'val',
        }
        nusc_eval = NuScenesEval(
            nusc,
            config=self.eval_detection_configs,
            result_path=result_path,
            eval_set=eval_set_map[self.version],
            output_dir=output_dir,
            verbose=False)
        nusc_eval.main(render_curves=False)

        # record metrics
256
        metrics = mmengine.load(osp.join(output_dir, 'metrics_summary.json'))
VVsssssk's avatar
VVsssssk committed
257
258
259
260
        detail = dict()
        metric_prefix = f'{result_name}_NuScenes'
        for name in classes:
            for k, v in metrics['label_aps'][name].items():
261
                val = float(f'{v:.4f}')
VVsssssk's avatar
VVsssssk committed
262
263
                detail[f'{metric_prefix}/{name}_AP_dist_{k}'] = val
            for k, v in metrics['label_tp_errors'][name].items():
264
                val = float(f'{v:.4f}')
VVsssssk's avatar
VVsssssk committed
265
266
                detail[f'{metric_prefix}/{name}_{k}'] = val
            for k, v in metrics['tp_errors'].items():
267
                val = float(f'{v:.4f}')
VVsssssk's avatar
VVsssssk committed
268
269
270
271
272
273
                detail[f'{metric_prefix}/{self.ErrNameMapping[k]}'] = val

        detail[f'{metric_prefix}/NDS'] = metrics['nd_score']
        detail[f'{metric_prefix}/mAP'] = metrics['mean_ap']
        return detail

274
275
276
277
278
279
    def format_results(
        self,
        results: List[dict],
        classes: Optional[List[str]] = None,
        jsonfile_prefix: Optional[str] = None
    ) -> Tuple[dict, Union[tempfile.TemporaryDirectory, None]]:
VVsssssk's avatar
VVsssssk committed
280
281
282
        """Format the mmdet3d results to standard NuScenes json file.

        Args:
283
284
285
            results (List[dict]): Testing results of the dataset.
            classes (List[str], optional): A list of class name.
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
286
287
288
            jsonfile_prefix (str, optional): The prefix of json files. It
                includes the file path and the prefix of filename, e.g.,
                "a/b/prefix". If not specified, a temp file will be created.
289
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
290
291
292

        Returns:
            tuple: Returns (result_dict, tmp_dir), where `result_dict` is a
293
294
295
            dict containing the json filepaths, `tmp_dir` is the temporal
            directory created for saving json files when
            `jsonfile_prefix` is not specified.
VVsssssk's avatar
VVsssssk committed
296
297
298
299
300
301
302
303
304
        """
        assert isinstance(results, list), 'results must be a list'

        if jsonfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            jsonfile_prefix = osp.join(tmp_dir.name, 'results')
        else:
            tmp_dir = None
        result_dict = dict()
305
        sample_idx_list = [result['sample_idx'] for result in results]
VVsssssk's avatar
VVsssssk committed
306
307
308
309
310
311

        for name in results[0]:
            if 'pred' in name and '3d' in name and name[0] != '_':
                print(f'\nFormating bboxes of {name}')
                results_ = [out[name] for out in results]
                tmp_file_ = osp.join(jsonfile_prefix, name)
ChaimZhu's avatar
ChaimZhu committed
312
                box_type_3d = type(results_[0]['bboxes_3d'])
ZCMax's avatar
ZCMax committed
313
314
                if box_type_3d == LiDARInstance3DBoxes:
                    result_dict[name] = self._format_lidar_bbox(
315
                        results_, sample_idx_list, classes, tmp_file_)
ZCMax's avatar
ZCMax committed
316
317
                elif box_type_3d == CameraInstance3DBoxes:
                    result_dict[name] = self._format_camera_bbox(
318
                        results_, sample_idx_list, classes, tmp_file_)
ZCMax's avatar
ZCMax committed
319

VVsssssk's avatar
VVsssssk committed
320
321
        return result_dict, tmp_dir

322
    def get_attr_name(self, attr_idx: int, label_name: str) -> str:
ChaimZhu's avatar
ChaimZhu committed
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
        """Get attribute from predicted index.

        This is a workaround to predict attribute when the predicted velocity
        is not reliable. We map the predicted attribute index to the one
        in the attribute set. If it is consistent with the category, we will
        keep it. Otherwise, we will use the default attribute.

        Args:
            attr_idx (int): Attribute index.
            label_name (str): Predicted category name.

        Returns:
            str: Predicted attribute name.
        """
        # TODO: Simplify the variable name
        AttrMapping_rev2 = [
            'cycle.with_rider', 'cycle.without_rider', 'pedestrian.moving',
            'pedestrian.standing', 'pedestrian.sitting_lying_down',
            'vehicle.moving', 'vehicle.parked', 'vehicle.stopped', 'None'
        ]
        if label_name == 'car' or label_name == 'bus' \
            or label_name == 'truck' or label_name == 'trailer' \
                or label_name == 'construction_vehicle':
            if AttrMapping_rev2[attr_idx] == 'vehicle.moving' or \
                AttrMapping_rev2[attr_idx] == 'vehicle.parked' or \
                    AttrMapping_rev2[attr_idx] == 'vehicle.stopped':
                return AttrMapping_rev2[attr_idx]
            else:
                return self.DefaultAttribute[label_name]
        elif label_name == 'pedestrian':
            if AttrMapping_rev2[attr_idx] == 'pedestrian.moving' or \
                AttrMapping_rev2[attr_idx] == 'pedestrian.standing' or \
                    AttrMapping_rev2[attr_idx] == \
                    'pedestrian.sitting_lying_down':
                return AttrMapping_rev2[attr_idx]
            else:
                return self.DefaultAttribute[label_name]
        elif label_name == 'bicycle' or label_name == 'motorcycle':
            if AttrMapping_rev2[attr_idx] == 'cycle.with_rider' or \
                    AttrMapping_rev2[attr_idx] == 'cycle.without_rider':
                return AttrMapping_rev2[attr_idx]
            else:
                return self.DefaultAttribute[label_name]
        else:
            return self.DefaultAttribute[label_name]

ZCMax's avatar
ZCMax committed
369
370
    def _format_camera_bbox(self,
                            results: List[dict],
371
372
373
                            sample_idx_list: List[int],
                            classes: Optional[List[str]] = None,
                            jsonfile_prefix: Optional[str] = None) -> str:
ZCMax's avatar
ZCMax committed
374
375
376
        """Convert the results to the standard format.

        Args:
377
378
379
380
381
            results (List[dict]): Testing results of the dataset.
            sample_idx_list (List[int]): List of result sample idx.
            classes (List[str], optional): A list of class name.
                Defaults to None.
            jsonfile_prefix (str, optional): The prefix of the output jsonfile.
ZCMax's avatar
ZCMax committed
382
                You can specify the output directory/filename by
383
                modifying the jsonfile_prefix. Defaults to None.
ZCMax's avatar
ZCMax committed
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403

        Returns:
            str: Path of the output json file.
        """
        nusc_annos = {}

        print('Start to convert detection format...')

        # Camera types in Nuscenes datasets
        camera_types = [
            'CAM_FRONT',
            'CAM_FRONT_RIGHT',
            'CAM_FRONT_LEFT',
            'CAM_BACK',
            'CAM_BACK_LEFT',
            'CAM_BACK_RIGHT',
        ]

        CAM_NUM = 6

404
        for i, det in enumerate(mmengine.track_iter_progress(results)):
ZCMax's avatar
ZCMax committed
405

406
            sample_idx = sample_idx_list[i]
ZCMax's avatar
ZCMax committed
407

408
409
            frame_sample_idx = sample_idx // CAM_NUM
            camera_type_id = sample_idx % CAM_NUM
ZCMax's avatar
ZCMax committed
410
411
412
413
414
415
416
417

            if camera_type_id == 0:
                boxes_per_frame = []
                attrs_per_frame = []

            # need to merge results from images of the same sample
            annos = []
            boxes, attrs = output_to_nusc_box(det)
418
            sample_token = self.data_infos[frame_sample_idx]['token']
ZCMax's avatar
ZCMax committed
419
420
            camera_type = camera_types[camera_type_id]
            boxes, attrs = cam_nusc_box_to_global(
421
                self.data_infos[frame_sample_idx], boxes, attrs, classes,
ChaimZhu's avatar
ChaimZhu committed
422
                self.eval_detection_configs, camera_type)
ZCMax's avatar
ZCMax committed
423
424
425
            boxes_per_frame.extend(boxes)
            attrs_per_frame.extend(attrs)
            # Remove redundant predictions caused by overlap of images
426
            if (sample_idx + 1) % CAM_NUM != 0:
ZCMax's avatar
ZCMax committed
427
                continue
428
            boxes = global_nusc_box_to_cam(self.data_infos[frame_sample_idx],
ChaimZhu's avatar
ChaimZhu committed
429
430
                                           boxes_per_frame, classes,
                                           self.eval_detection_configs)
ZCMax's avatar
ZCMax committed
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
            cam_boxes3d, scores, labels = nusc_box_to_cam_box3d(boxes)
            # box nms 3d over 6 images in a frame
            # TODO: move this global setting into config
            nms_cfg = dict(
                use_rotate_nms=True,
                nms_across_levels=False,
                nms_pre=4096,
                nms_thr=0.05,
                score_thr=0.01,
                min_bbox_size=0,
                max_per_frame=500)
            nms_cfg = Config(nms_cfg)
            cam_boxes3d_for_nms = xywhr2xyxyr(cam_boxes3d.bev)
            boxes3d = cam_boxes3d.tensor
            # generate attr scores from attr labels
            attrs = labels.new_tensor([attr for attr in attrs_per_frame])
            boxes3d, scores, labels, attrs = box3d_multiclass_nms(
                boxes3d,
                cam_boxes3d_for_nms,
                scores,
                nms_cfg.score_thr,
                nms_cfg.max_per_frame,
                nms_cfg,
                mlvl_attr_scores=attrs)
            cam_boxes3d = CameraInstance3DBoxes(boxes3d, box_dim=9)
            det = bbox3d2result(cam_boxes3d, scores, labels, attrs)
            boxes, attrs = output_to_nusc_box(det)
            boxes, attrs = cam_nusc_box_to_global(
459
                self.data_infos[frame_sample_idx], boxes, attrs, classes,
ChaimZhu's avatar
ChaimZhu committed
460
                self.eval_detection_configs)
ZCMax's avatar
ZCMax committed
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485

            for i, box in enumerate(boxes):
                name = classes[box.label]
                attr = self.get_attr_name(attrs[i], name)
                nusc_anno = dict(
                    sample_token=sample_token,
                    translation=box.center.tolist(),
                    size=box.wlh.tolist(),
                    rotation=box.orientation.elements.tolist(),
                    velocity=box.velocity[:2].tolist(),
                    detection_name=name,
                    detection_score=box.score,
                    attribute_name=attr)
                annos.append(nusc_anno)
            # other views results of the same frame should be concatenated
            if sample_token in nusc_annos:
                nusc_annos[sample_token].extend(annos)
            else:
                nusc_annos[sample_token] = annos

        nusc_submissions = {
            'meta': self.modality,
            'results': nusc_annos,
        }

486
        mmengine.mkdir_or_exist(jsonfile_prefix)
ZCMax's avatar
ZCMax committed
487
        res_path = osp.join(jsonfile_prefix, 'results_nusc.json')
488
        print(f'Results writes to {res_path}')
489
        mmengine.dump(nusc_submissions, res_path)
ZCMax's avatar
ZCMax committed
490
491
492
493
        return res_path

    def _format_lidar_bbox(self,
                           results: List[dict],
494
495
496
                           sample_idx_list: List[int],
                           classes: Optional[List[str]] = None,
                           jsonfile_prefix: Optional[str] = None) -> str:
VVsssssk's avatar
VVsssssk committed
497
498
499
        """Convert the results to the standard format.

        Args:
500
501
502
503
            results (List[dict]): Testing results of the dataset.
            sample_idx_list (List[int]): List of result sample idx.
            classes (List[str], optional): A list of class name.
                Defaults to None.
VVsssssk's avatar
VVsssssk committed
504
505
            jsonfile_prefix (str, optional): The prefix of the output jsonfile.
                You can specify the output directory/filename by
506
                modifying the jsonfile_prefix. Defaults to None.
VVsssssk's avatar
VVsssssk committed
507
508
509
510
511
512
513

        Returns:
            str: Path of the output json file.
        """
        nusc_annos = {}

        print('Start to convert detection format...')
514
        for i, det in enumerate(mmengine.track_iter_progress(results)):
VVsssssk's avatar
VVsssssk committed
515
            annos = []
516
            boxes, attrs = output_to_nusc_box(det)
517
518
519
520
            sample_idx = sample_idx_list[i]
            sample_token = self.data_infos[sample_idx]['token']
            boxes = lidar_nusc_box_to_global(self.data_infos[sample_idx],
                                             boxes, classes,
VVsssssk's avatar
VVsssssk committed
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
                                             self.eval_detection_configs)
            for i, box in enumerate(boxes):
                name = classes[box.label]
                if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2:
                    if name in [
                            'car',
                            'construction_vehicle',
                            'bus',
                            'truck',
                            'trailer',
                    ]:
                        attr = 'vehicle.moving'
                    elif name in ['bicycle', 'motorcycle']:
                        attr = 'cycle.with_rider'
                    else:
                        attr = self.DefaultAttribute[name]
                else:
                    if name in ['pedestrian']:
                        attr = 'pedestrian.standing'
                    elif name in ['bus']:
                        attr = 'vehicle.stopped'
                    else:
                        attr = self.DefaultAttribute[name]

                nusc_anno = dict(
                    sample_token=sample_token,
                    translation=box.center.tolist(),
                    size=box.wlh.tolist(),
                    rotation=box.orientation.elements.tolist(),
                    velocity=box.velocity[:2].tolist(),
                    detection_name=name,
                    detection_score=box.score,
                    attribute_name=attr)
                annos.append(nusc_anno)
            nusc_annos[sample_token] = annos
        nusc_submissions = {
            'meta': self.modality,
            'results': nusc_annos,
        }
560
        mmengine.mkdir_or_exist(jsonfile_prefix)
VVsssssk's avatar
VVsssssk committed
561
        res_path = osp.join(jsonfile_prefix, 'results_nusc.json')
562
        print(f'Results writes to {res_path}')
563
        mmengine.dump(nusc_submissions, res_path)
VVsssssk's avatar
VVsssssk committed
564
565
566
        return res_path


567
568
def output_to_nusc_box(
        detection: dict) -> Tuple[List[NuScenesBox], Union[np.ndarray, None]]:
VVsssssk's avatar
VVsssssk committed
569
570
571
572
573
    """Convert the output to the box class in the nuScenes.

    Args:
        detection (dict): Detection results.

ChaimZhu's avatar
ChaimZhu committed
574
            - bboxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox.
VVsssssk's avatar
VVsssssk committed
575
576
577
578
            - scores_3d (torch.Tensor): Detection scores.
            - labels_3d (torch.Tensor): Predicted box labels.

    Returns:
579
580
        Tuple[List[:obj:`NuScenesBox`], np.ndarray or None]:
        List of standard NuScenesBoxes and attribute labels.
VVsssssk's avatar
VVsssssk committed
581
    """
ChaimZhu's avatar
ChaimZhu committed
582
    bbox3d = detection['bboxes_3d']
VVsssssk's avatar
VVsssssk committed
583
584
    scores = detection['scores_3d'].numpy()
    labels = detection['labels_3d'].numpy()
ZCMax's avatar
ZCMax committed
585
586
587
    attrs = None
    if 'attr_labels' in detection:
        attrs = detection['attr_labels'].numpy()
VVsssssk's avatar
VVsssssk committed
588
589
590
591
592
593

    box_gravity_center = bbox3d.gravity_center.numpy()
    box_dims = bbox3d.dims.numpy()
    box_yaw = bbox3d.yaw.numpy()

    box_list = []
ZCMax's avatar
ZCMax committed
594

595
    if isinstance(bbox3d, LiDARInstance3DBoxes):
ZCMax's avatar
ZCMax committed
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
        # our LiDAR coordinate system -> nuScenes box coordinate system
        nus_box_dims = box_dims[:, [1, 0, 2]]
        for i in range(len(bbox3d)):
            quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i])
            velocity = (*bbox3d.tensor[i, 7:9], 0.0)
            # velo_val = np.linalg.norm(box3d[i, 7:9])
            # velo_ori = box3d[i, 6]
            # velocity = (
            # velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0)
            box = NuScenesBox(
                box_gravity_center[i],
                nus_box_dims[i],
                quat,
                label=labels[i],
                score=scores[i],
                velocity=velocity)
            box_list.append(box)
613
    elif isinstance(bbox3d, CameraInstance3DBoxes):
ZCMax's avatar
ZCMax committed
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
        # our Camera coordinate system -> nuScenes box coordinate system
        # convert the dim/rot to nuscbox convention
        nus_box_dims = box_dims[:, [2, 0, 1]]
        nus_box_yaw = -box_yaw
        for i in range(len(bbox3d)):
            q1 = pyquaternion.Quaternion(
                axis=[0, 0, 1], radians=nus_box_yaw[i])
            q2 = pyquaternion.Quaternion(axis=[1, 0, 0], radians=np.pi / 2)
            quat = q2 * q1
            velocity = (bbox3d.tensor[i, 7], 0.0, bbox3d.tensor[i, 8])
            box = NuScenesBox(
                box_gravity_center[i],
                nus_box_dims[i],
                quat,
                label=labels[i],
                score=scores[i],
                velocity=velocity)
            box_list.append(box)
    else:
        raise NotImplementedError(
634
            f'Do not support convert {type(bbox3d)} bboxes '
ZCMax's avatar
ZCMax committed
635
636
637
            'to standard NuScenesBoxes.')

    return box_list, attrs
VVsssssk's avatar
VVsssssk committed
638
639
640
641
642
643
644
645
646
647


def lidar_nusc_box_to_global(
        info: dict, boxes: List[NuScenesBox], classes: List[str],
        eval_configs: DetectionConfig) -> List[NuScenesBox]:
    """Convert the box from ego to global coordinate.

    Args:
        info (dict): Info for a specific sample data, including the
            calibration information.
648
649
650
        boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
        classes (List[str]): Mapped classes in the evaluation.
        eval_configs (:obj:`DetectionConfig`): Evaluation configuration object.
VVsssssk's avatar
VVsssssk committed
651
652

    Returns:
653
654
        List[:obj:`DetectionConfig`]: List of standard NuScenesBoxes in the
        global coordinate.
VVsssssk's avatar
VVsssssk committed
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
    """
    box_list = []
    for box in boxes:
        # Move box to ego vehicle coord system
        lidar2ego = np.array(info['lidar_points']['lidar2ego'])
        box.rotate(
            pyquaternion.Quaternion(matrix=lidar2ego, rtol=1e-05, atol=1e-07))
        box.translate(lidar2ego[:3, 3])
        # filter det in ego.
        cls_range_map = eval_configs.class_range
        radius = np.linalg.norm(box.center[:2], 2)
        det_range = cls_range_map[classes[box.label]]
        if radius > det_range:
            continue
        # Move box to global coord system
        ego2global = np.array(info['ego2global'])
        box.rotate(
            pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05, atol=1e-07))
        box.translate(ego2global[:3, 3])
        box_list.append(box)
    return box_list
ZCMax's avatar
ZCMax committed
676
677


ChaimZhu's avatar
ChaimZhu committed
678
679
680
def cam_nusc_box_to_global(
    info: dict,
    boxes: List[NuScenesBox],
681
    attrs: np.ndarray,
ChaimZhu's avatar
ChaimZhu committed
682
683
684
    classes: List[str],
    eval_configs: DetectionConfig,
    camera_type: str = 'CAM_FRONT',
685
) -> Tuple[List[NuScenesBox], List[int]]:
ZCMax's avatar
ZCMax committed
686
687
688
689
690
    """Convert the box from camera to global coordinate.

    Args:
        info (dict): Info for a specific sample data, including the
            calibration information.
691
692
693
694
695
        boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
        attrs (np.ndarray): Predicted attributes.
        classes (List[str]): Mapped classes in the evaluation.
        eval_configs (:obj:`DetectionConfig`): Evaluation configuration object.
        camera_type (str): Type of camera. Defaults to 'CAM_FRONT'.
ZCMax's avatar
ZCMax committed
696
697

    Returns:
698
699
700
        Tuple[List[:obj:`NuScenesBox`], List[int]]:
        List of standard NuScenesBoxes in the global coordinate and
        attribute label.
ZCMax's avatar
ZCMax committed
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
    """
    box_list = []
    attr_list = []
    for (box, attr) in zip(boxes, attrs):
        # Move box to ego vehicle coord system
        cam2ego = np.array(info['images'][camera_type]['cam2ego'])
        box.rotate(
            pyquaternion.Quaternion(matrix=cam2ego, rtol=1e-05, atol=1e-07))
        box.translate(cam2ego[:3, 3])
        # filter det in ego.
        cls_range_map = eval_configs.class_range
        radius = np.linalg.norm(box.center[:2], 2)
        det_range = cls_range_map[classes[box.label]]
        if radius > det_range:
            continue
        # Move box to global coord system
        ego2global = np.array(info['ego2global'])
        box.rotate(
            pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05, atol=1e-07))
        box.translate(ego2global[:3, 3])
        box_list.append(box)
        attr_list.append(attr)
    return box_list, attr_list


def global_nusc_box_to_cam(info: dict, boxes: List[NuScenesBox],
                           classes: List[str],
                           eval_configs: DetectionConfig) -> List[NuScenesBox]:
    """Convert the box from global to camera coordinate.

    Args:
        info (dict): Info for a specific sample data, including the
            calibration information.
734
735
736
        boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
        classes (List[str]): Mapped classes in the evaluation.
        eval_configs (:obj:`DetectionConfig`): Evaluation configuration object.
ZCMax's avatar
ZCMax committed
737
738

    Returns:
739
740
        List[:obj:`NuScenesBox`]: List of standard NuScenesBoxes in
        camera coordinate.
ZCMax's avatar
ZCMax committed
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
    """
    box_list = []
    for box in boxes:
        # Move box to ego vehicle coord system
        ego2global = np.array(info['ego2global'])
        box.translate(-ego2global[:3, 3])
        box.rotate(
            pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05,
                                    atol=1e-07).inverse)
        # filter det in ego.
        cls_range_map = eval_configs.class_range
        radius = np.linalg.norm(box.center[:2], 2)
        det_range = cls_range_map[classes[box.label]]
        if radius > det_range:
            continue
        # Move box to camera coord system
        cam2ego = np.array(info['images']['CAM_FRONT']['cam2ego'])
ChaimZhu's avatar
ChaimZhu committed
758
        box.translate(-cam2ego[:3, 3])
ZCMax's avatar
ZCMax committed
759
760
761
762
763
764
765
        box.rotate(
            pyquaternion.Quaternion(matrix=cam2ego, rtol=1e-05,
                                    atol=1e-07).inverse)
        box_list.append(box)
    return box_list


766
767
768
def nusc_box_to_cam_box3d(
    boxes: List[NuScenesBox]
) -> Tuple[CameraInstance3DBoxes, torch.Tensor, torch.Tensor]:
ZCMax's avatar
ZCMax committed
769
770
771
    """Convert boxes from :obj:`NuScenesBox` to :obj:`CameraInstance3DBoxes`.

    Args:
772
        boxes (:obj:`List[NuScenesBox]`): List of predicted NuScenesBoxes.
ZCMax's avatar
ZCMax committed
773
774

    Returns:
775
776
        Tuple[:obj:`CameraInstance3DBoxes`, torch.Tensor, torch.Tensor]:
        Converted 3D bounding boxes, scores and labels.
ZCMax's avatar
ZCMax committed
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
    """
    locs = torch.Tensor([b.center for b in boxes]).view(-1, 3)
    dims = torch.Tensor([b.wlh for b in boxes]).view(-1, 3)
    rots = torch.Tensor([b.orientation.yaw_pitch_roll[0]
                         for b in boxes]).view(-1, 1)
    velocity = torch.Tensor([b.velocity[0::2] for b in boxes]).view(-1, 2)

    # convert nusbox to cambox convention
    dims[:, [0, 1, 2]] = dims[:, [1, 2, 0]]
    rots = -rots

    boxes_3d = torch.cat([locs, dims, rots, velocity], dim=1).cuda()
    cam_boxes3d = CameraInstance3DBoxes(
        boxes_3d, box_dim=9, origin=(0.5, 0.5, 0.5))
    scores = torch.Tensor([b.score for b in boxes]).cuda()
    labels = torch.LongTensor([b.label for b in boxes]).cuda()
    nms_scores = scores.new_zeros(scores.shape[0], 10 + 1)
    indices = labels.new_tensor(list(range(scores.shape[0])))
    nms_scores[indices, labels] = scores
    return cam_boxes3d, nms_scores, labels