nuscenes_dataset.py 16.4 KB
Newer Older
zhangwenwei's avatar
zhangwenwei committed
1
2
3
4
5
6
7
8
9
import os.path as osp
import tempfile

import mmcv
import numpy as np
import pyquaternion
from nuscenes.utils.data_classes import Box as NuScenesBox

from mmdet.datasets import DATASETS
liyinhao's avatar
liyinhao committed
10
11
from ..core import show_result
from ..core.bbox import Box3DMode, LiDARInstance3DBoxes
zhangwenwei's avatar
zhangwenwei committed
12
from .custom_3d import Custom3DDataset
zhangwenwei's avatar
zhangwenwei committed
13
14


15
@DATASETS.register_module()
zhangwenwei's avatar
zhangwenwei committed
16
class NuScenesDataset(Custom3DDataset):
zhangwenwei's avatar
zhangwenwei committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
    NameMapping = {
        'movable_object.barrier': 'barrier',
        'vehicle.bicycle': 'bicycle',
        'vehicle.bus.bendy': 'bus',
        'vehicle.bus.rigid': 'bus',
        'vehicle.car': 'car',
        'vehicle.construction': 'construction_vehicle',
        'vehicle.motorcycle': 'motorcycle',
        'human.pedestrian.adult': 'pedestrian',
        'human.pedestrian.child': 'pedestrian',
        'human.pedestrian.construction_worker': 'pedestrian',
        'human.pedestrian.police_officer': 'pedestrian',
        'movable_object.trafficcone': 'traffic_cone',
        'vehicle.trailer': 'trailer',
        'vehicle.truck': 'truck'
    }
    DefaultAttribute = {
        'car': 'vehicle.parked',
        'pedestrian': 'pedestrian.moving',
        'trailer': 'vehicle.parked',
        'truck': 'vehicle.parked',
        'bus': 'vehicle.moving',
        'motorcycle': 'cycle.without_rider',
        'construction_vehicle': 'vehicle.parked',
        'bicycle': 'cycle.without_rider',
        'barrier': '',
        'traffic_cone': '',
    }
    AttrMapping = {
        'cycle.with_rider': 0,
        'cycle.without_rider': 1,
        'pedestrian.moving': 2,
        'pedestrian.standing': 3,
        'pedestrian.sitting_lying_down': 4,
        'vehicle.moving': 5,
        'vehicle.parked': 6,
        'vehicle.stopped': 7,
    }
    AttrMapping_rev = [
        'cycle.with_rider',
        'cycle.without_rider',
        'pedestrian.moving',
        'pedestrian.standing',
        'pedestrian.sitting_lying_down',
        'vehicle.moving',
        'vehicle.parked',
        'vehicle.stopped',
    ]
    CLASSES = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle',
               'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone',
               'barrier')

    def __init__(self,
                 ann_file,
                 pipeline=None,
zhangwenwei's avatar
zhangwenwei committed
72
73
                 data_root=None,
                 classes=None,
zhangwenwei's avatar
zhangwenwei committed
74
75
76
                 load_interval=1,
                 with_velocity=True,
                 modality=None,
77
78
79
                 box_type_3d='LiDAR',
                 filter_empty_gt=True,
                 test_mode=False,
zhangwenwei's avatar
zhangwenwei committed
80
                 eval_version='detection_cvpr_2019'):
zhangwenwei's avatar
zhangwenwei committed
81
        self.load_interval = load_interval
zhangwenwei's avatar
zhangwenwei committed
82
83
84
85
86
87
        super().__init__(
            data_root=data_root,
            ann_file=ann_file,
            pipeline=pipeline,
            classes=classes,
            modality=modality,
88
89
            box_type_3d=box_type_3d,
            filter_empty_gt=filter_empty_gt,
zhangwenwei's avatar
zhangwenwei committed
90
            test_mode=test_mode)
zhangwenwei's avatar
zhangwenwei committed
91
92
93
94
95
96

        self.with_velocity = with_velocity
        self.eval_version = eval_version
        from nuscenes.eval.detection.config import config_factory
        self.eval_detection_configs = config_factory(self.eval_version)

zhangwenwei's avatar
zhangwenwei committed
97
98
        if self.modality is None:
            self.modality = dict(
zhangwenwei's avatar
zhangwenwei committed
99
100
101
102
103
104
105
                use_camera=False,
                use_lidar=True,
                use_radar=False,
                use_map=False,
                use_external=False,
            )

zhangwenwei's avatar
zhangwenwei committed
106
107
108
109
110
111
112
    def load_annotations(self, ann_file):
        data = mmcv.load(ann_file)
        data_infos = list(sorted(data['infos'], key=lambda e: e['timestamp']))
        data_infos = data_infos[::self.load_interval]
        self.metadata = data['metadata']
        self.version = self.metadata['version']
        return data_infos
zhangwenwei's avatar
zhangwenwei committed
113

zhangwenwei's avatar
zhangwenwei committed
114
    def get_data_info(self, index):
zhangwenwei's avatar
zhangwenwei committed
115
        info = self.data_infos[index]
zhangwenwei's avatar
zhangwenwei committed
116

zhangwenwei's avatar
zhangwenwei committed
117
        # standard protocal modified from SECOND.Pytorch
zhangwenwei's avatar
zhangwenwei committed
118
119
        input_dict = dict(
            sample_idx=info['token'],
zhangwenwei's avatar
zhangwenwei committed
120
121
122
            pts_filename=info['lidar_path'],
            sweeps=info['sweeps'],
            timestamp=info['timestamp'] / 1e6,
zhangwenwei's avatar
zhangwenwei committed
123
124
125
126
127
128
        )

        if self.modality['use_camera']:
            image_paths = []
            lidar2img_rts = []
            for cam_type, cam_info in info['cams'].items():
zhangwenwei's avatar
zhangwenwei committed
129
                image_paths.append(cam_info['data_path'])
zhangwenwei's avatar
zhangwenwei committed
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
                # obtain lidar to image transformation matrix
                lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation'])
                lidar2cam_t = cam_info[
                    'sensor2lidar_translation'] @ lidar2cam_r.T
                lidar2cam_rt = np.eye(4)
                lidar2cam_rt[:3, :3] = lidar2cam_r.T
                lidar2cam_rt[3, :3] = -lidar2cam_t
                intrinsic = cam_info['cam_intrinsic']
                viewpad = np.eye(4)
                viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic
                lidar2img_rt = (viewpad @ lidar2cam_rt.T)
                lidar2img_rts.append(lidar2img_rt)

            input_dict.update(
                dict(
zhangwenwei's avatar
zhangwenwei committed
145
                    img_filename=image_paths,
zhangwenwei's avatar
zhangwenwei committed
146
147
148
                    lidar2img=lidar2img_rts,
                ))

zhangwenwei's avatar
zhangwenwei committed
149
        if not self.test_mode:
zhangwenwei's avatar
zhangwenwei committed
150
            annos = self.get_ann_info(index)
zhangwenwei's avatar
zhangwenwei committed
151
            input_dict['ann_info'] = annos
zhangwenwei's avatar
zhangwenwei committed
152
153
154
155

        return input_dict

    def get_ann_info(self, index):
zhangwenwei's avatar
zhangwenwei committed
156
        info = self.data_infos[index]
zhangwenwei's avatar
zhangwenwei committed
157
158
159
160
        # filter out bbox containing no points
        mask = info['num_lidar_pts'] > 0
        gt_bboxes_3d = info['gt_boxes'][mask]
        gt_names_3d = info['gt_names'][mask]
zhangwenwei's avatar
zhangwenwei committed
161
162
163
164
165
166
167
        gt_labels_3d = []
        for cat in gt_names_3d:
            if cat in self.CLASSES:
                gt_labels_3d.append(self.CLASSES.index(cat))
            else:
                gt_labels_3d.append(-1)
        gt_labels_3d = np.array(gt_labels_3d)
zhangwenwei's avatar
zhangwenwei committed
168
169
170
171
172
173
174

        if self.with_velocity:
            gt_velocity = info['gt_velocity'][mask]
            nan_mask = np.isnan(gt_velocity[:, 0])
            gt_velocity[nan_mask] = [0.0, 0.0]
            gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_velocity], axis=-1)

zhangwenwei's avatar
zhangwenwei committed
175
        # the nuscenes box center is [0.5, 0.5, 0.5], we keep it
wuyuefeng's avatar
wuyuefeng committed
176
        # the same as KITTI (0.5, 0.5, 0)
zhangwenwei's avatar
zhangwenwei committed
177
178
179
        gt_bboxes_3d = LiDARInstance3DBoxes(
            gt_bboxes_3d,
            box_dim=gt_bboxes_3d.shape[-1],
wuyuefeng's avatar
wuyuefeng committed
180
            origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)
zhangwenwei's avatar
zhangwenwei committed
181

zhangwenwei's avatar
zhangwenwei committed
182
183
        anns_results = dict(
            gt_bboxes_3d=gt_bboxes_3d,
zhangwenwei's avatar
zhangwenwei committed
184
            gt_labels_3d=gt_labels_3d,
liyinhao's avatar
liyinhao committed
185
            gt_names=gt_names_3d)
zhangwenwei's avatar
zhangwenwei committed
186
187
188
189
        return anns_results

    def _format_bbox(self, results, jsonfile_prefix=None):
        nusc_annos = {}
zhangwenwei's avatar
zhangwenwei committed
190
        mapped_class_names = self.CLASSES
zhangwenwei's avatar
zhangwenwei committed
191

zhangwenwei's avatar
zhangwenwei committed
192
        print('Start to convert detection format...')
zhangwenwei's avatar
zhangwenwei committed
193
        for sample_id, det in enumerate(mmcv.track_iter_progress(results)):
zhangwenwei's avatar
zhangwenwei committed
194
            annos = []
zhangwenwei's avatar
zhangwenwei committed
195
196
197
198
            boxes = output_to_nusc_box(det)
            sample_token = self.data_infos[sample_id]['token']
            boxes = lidar_nusc_box_to_global(self.data_infos[sample_id], boxes,
                                             mapped_class_names,
zhangwenwei's avatar
zhangwenwei committed
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
                                             self.eval_detection_configs,
                                             self.eval_version)
            for i, box in enumerate(boxes):
                name = mapped_class_names[box.label]
                if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2:
                    if name in [
                            'car',
                            'construction_vehicle',
                            'bus',
                            'truck',
                            'trailer',
                    ]:
                        attr = 'vehicle.moving'
                    elif name in ['bicycle', 'motorcycle']:
                        attr = 'cycle.with_rider'
                    else:
                        attr = NuScenesDataset.DefaultAttribute[name]
                else:
                    if name in ['pedestrian']:
                        attr = 'pedestrian.standing'
                    elif name in ['bus']:
                        attr = 'vehicle.stopped'
                    else:
                        attr = NuScenesDataset.DefaultAttribute[name]

                nusc_anno = dict(
zhangwenwei's avatar
zhangwenwei committed
225
                    sample_token=sample_token,
zhangwenwei's avatar
zhangwenwei committed
226
227
228
229
230
231
232
233
                    translation=box.center.tolist(),
                    size=box.wlh.tolist(),
                    rotation=box.orientation.elements.tolist(),
                    velocity=box.velocity[:2].tolist(),
                    detection_name=name,
                    detection_score=box.score,
                    attribute_name=attr)
                annos.append(nusc_anno)
zhangwenwei's avatar
zhangwenwei committed
234
            nusc_annos[sample_token] = annos
zhangwenwei's avatar
zhangwenwei committed
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
        nusc_submissions = {
            'meta': self.modality,
            'results': nusc_annos,
        }

        mmcv.mkdir_or_exist(jsonfile_prefix)
        res_path = osp.join(jsonfile_prefix, 'results_nusc.json')
        print('Results writes to', res_path)
        mmcv.dump(nusc_submissions, res_path)
        return res_path

    def _evaluate_single(self,
                         result_path,
                         logger=None,
                         metric='bbox',
                         result_name='pts_bbox'):
        from nuscenes import NuScenes
        from nuscenes.eval.detection.evaluate import NuScenesEval

        output_dir = osp.join(*osp.split(result_path)[:-1])
        nusc = NuScenes(
            version=self.version, dataroot=self.data_root, verbose=False)
        eval_set_map = {
            'v1.0-mini': 'mini_train',
            'v1.0-trainval': 'val',
        }
        nusc_eval = NuScenesEval(
            nusc,
            config=self.eval_detection_configs,
            result_path=result_path,
            eval_set=eval_set_map[self.version],
            output_dir=output_dir,
            verbose=False)
        nusc_eval.main(render_curves=False)

        # record metrics
        metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
        detail = dict()
        metric_prefix = '{}_NuScenes'.format(result_name)
zhangwenwei's avatar
zhangwenwei committed
274
        for name in self.CLASSES:
zhangwenwei's avatar
zhangwenwei committed
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
            for k, v in metrics['label_aps'][name].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val
            for k, v in metrics['label_tp_errors'][name].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_{}'.format(metric_prefix, name, k)] = val

        detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']
        detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']
        return detail

    def format_results(self, results, jsonfile_prefix=None):
        """Format the results to json (standard format for COCO evaluation).

        Args:
            results (list): Testing results of the dataset.
            jsonfile_prefix (str | None): The prefix of json files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.

        Returns:
            tuple: (result_files, tmp_dir), result_files is a dict containing
                the json filepaths, tmp_dir is the temporal directory created
                for saving json files when jsonfile_prefix is not specified.
        """
        assert isinstance(results, list), 'results must be a list'
        assert len(results) == len(self), (
            'The length of results is not equal to the dataset len: {} != {}'.
            format(len(results), len(self)))

        if jsonfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            jsonfile_prefix = osp.join(tmp_dir.name, 'results')
        else:
            tmp_dir = None

        if not isinstance(results[0], dict):
            result_files = self._format_bbox(results, jsonfile_prefix)
        else:
            result_files = dict()
            for name in results[0]:
zhangwenwei's avatar
zhangwenwei committed
316
                print(f'\nFormating bboxes of {name}')
zhangwenwei's avatar
zhangwenwei committed
317
318
319
320
321
322
323
324
325
326
327
                results_ = [out[name] for out in results]
                tmp_file_ = osp.join(jsonfile_prefix, name)
                result_files.update(
                    {name: self._format_bbox(results_, tmp_file_)})
        return result_files, tmp_dir

    def evaluate(self,
                 results,
                 metric='bbox',
                 logger=None,
                 jsonfile_prefix=None,
liyinhao's avatar
liyinhao committed
328
329
330
                 result_names=['pts_bbox'],
                 show=False,
                 out_dir=None):
zhangwenwei's avatar
zhangwenwei committed
331
332
333
334
335
336
337
338
339
340
        """Evaluation in nuScenes protocol.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str | None): The prefix of json files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
liyinhao's avatar
liyinhao committed
341
342
343
344
            show (bool): Whether to visualize.
                Default: False.
            out_dir (str): Path to save the visualization results.
                Default: None.
zhangwenwei's avatar
zhangwenwei committed
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361

        Returns:
            dict[str: float]
        """
        result_files, tmp_dir = self.format_results(results, jsonfile_prefix)

        if isinstance(result_files, dict):
            results_dict = dict()
            for name in result_names:
                print('Evaluating bboxes of {}'.format(name))
                ret_dict = self._evaluate_single(result_files[name])
            results_dict.update(ret_dict)
        elif isinstance(result_files, str):
            results_dict = self._evaluate_single(result_files)

        if tmp_dir is not None:
            tmp_dir.cleanup()
liyinhao's avatar
liyinhao committed
362
363
364

        if show:
            self.show(results, out_dir)
zhangwenwei's avatar
zhangwenwei committed
365
366
        return results_dict

liyinhao's avatar
liyinhao committed
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
    def show(self, results, out_dir):
        for i, result in enumerate(results):
            data_info = self.data_infos[i]
            pts_path = data_info['lidar_path']
            file_name = osp.split(pts_path)[-1].split('.')[0]
            points = np.fromfile(pts_path, dtype=np.float32).reshape(-1, 4)
            points = points[..., [1, 0, 2]]
            points[..., 0] *= -1
            gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor
            gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR,
                                          Box3DMode.DEPTH)
            gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2
            pred_bboxes = result['boxes_3d'].tensor.numpy()
            pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR,
                                            Box3DMode.DEPTH)
            pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
            show_result(points, gt_bboxes, pred_bboxes, out_dir, file_name)
        print(results)

zhangwenwei's avatar
zhangwenwei committed
386
387

def output_to_nusc_box(detection):
388
    box3d = detection['boxes_3d']
zhangwenwei's avatar
zhangwenwei committed
389
390
    scores = detection['scores_3d'].numpy()
    labels = detection['labels_3d'].numpy()
391
392
393
394

    box_gravity_center = box3d.gravity_center.numpy()
    box_dims = box3d.dims.numpy()
    box_yaw = box3d.yaw.numpy()
zhangwenwei's avatar
zhangwenwei committed
395
396
    # TODO: check whether this is necessary
    # with dir_offset & dir_limit in the head
397
398
    box_yaw = -box_yaw - np.pi / 2

zhangwenwei's avatar
zhangwenwei committed
399
    box_list = []
400
401
402
    for i in range(len(box3d)):
        quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i])
        velocity = (*box3d.tensor[i, 7:9], 0.0)
zhangwenwei's avatar
zhangwenwei committed
403
404
405
406
407
        # velo_val = np.linalg.norm(box3d[i, 7:9])
        # velo_ori = box3d[i, 6]
        # velocity = (
        # velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0)
        box = NuScenesBox(
408
409
            box_gravity_center[i],
            box_dims[i],
zhangwenwei's avatar
zhangwenwei committed
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
            quat,
            label=labels[i],
            score=scores[i],
            velocity=velocity)
        box_list.append(box)
    return box_list


def lidar_nusc_box_to_global(info,
                             boxes,
                             classes,
                             eval_configs,
                             eval_version='detection_cvpr_2019'):
    box_list = []
    for box in boxes:
        # Move box to ego vehicle coord system
        box.rotate(pyquaternion.Quaternion(info['lidar2ego_rotation']))
        box.translate(np.array(info['lidar2ego_translation']))
        # filter det in ego.
        cls_range_map = eval_configs.class_range
        radius = np.linalg.norm(box.center[:2], 2)
        det_range = cls_range_map[classes[box.label]]
        if radius > det_range:
            continue
        # Move box to global coord system
        box.rotate(pyquaternion.Quaternion(info['ego2global_rotation']))
        box.translate(np.array(info['ego2global_translation']))
        box_list.append(box)
    return box_list