kitti_dataset.py 6.37 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
from typing import Callable, List, Union
3
4

import numpy as np
zhangwenwei's avatar
zhangwenwei committed
5

jshilong's avatar
jshilong committed
6
from mmdet3d.datasets import DATASETS
zhangshilong's avatar
zhangshilong committed
7
from mmdet3d.structures import CameraInstance3DBoxes
jshilong's avatar
jshilong committed
8
from .det3d_dataset import Det3DDataset
zhangwenwei's avatar
zhangwenwei committed
9
10


11
@DATASETS.register_module()
jshilong's avatar
jshilong committed
12
class KittiDataset(Det3DDataset):
zhangwenwei's avatar
zhangwenwei committed
13
    r"""KITTI Dataset.
wangtai's avatar
wangtai committed
14

zhangwenwei's avatar
zhangwenwei committed
15
16
    This class serves as the API for experiments on the `KITTI Dataset
    <http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d>`_.
wangtai's avatar
wangtai committed
17
18
19
20
21
22
23

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
        pipeline (list[dict], optional): Pipeline used for data processing.
            Defaults to None.
        modality (dict, optional): Modality to specify the sensor data used
jshilong's avatar
jshilong committed
24
            as input. Defaults to `dict(use_lidar=True)`.
25
26
        default_cam_key (str, optional): The default camera name adopted.
            Defaults to 'CAM2'.
wangtai's avatar
wangtai committed
27
28
29
        box_type_3d (str, optional): Type of 3D box of this dataset.
            Based on the `box_type_3d`, the dataset will encapsulate the box
            to its original format then converted them to `box_type_3d`.
30
            Defaults to 'LiDAR' in this dataset. Available options includes:
wangtai's avatar
wangtai committed
31

wangtai's avatar
wangtai committed
32
33
34
            - 'LiDAR': Box in LiDAR coordinates.
            - 'Depth': Box in depth coordinates, usually for indoor dataset.
            - 'Camera': Box in camera coordinates.
wangtai's avatar
wangtai committed
35
36
37
38
        filter_empty_gt (bool, optional): Whether to filter empty GT.
            Defaults to True.
        test_mode (bool, optional): Whether the dataset is in test mode.
            Defaults to False.
39
40
41
        pcd_limit_range (list[float], optional): The range of point cloud
            used to filter invalid predicted boxes.
            Defaults to [0, -40, -3, 70.4, 40, 0.0].
wangtai's avatar
wangtai committed
42
    """
jshilong's avatar
jshilong committed
43
    # TODO: use full classes of kitti
VVsssssk's avatar
VVsssssk committed
44
45
46
47
    METAINFO = {
        'CLASSES': ('Pedestrian', 'Cyclist', 'Car', 'Van', 'Truck',
                    'Person_sitting', 'Tram', 'Misc')
    }
zhangwenwei's avatar
zhangwenwei committed
48
49

    def __init__(self,
jshilong's avatar
jshilong committed
50
51
52
                 data_root: str,
                 ann_file: str,
                 pipeline: List[Union[dict, Callable]] = [],
53
                 modality: dict = dict(use_lidar=True),
54
                 default_cam_key: str = 'CAM2',
55
                 task: str = 'lidar_det',
jshilong's avatar
jshilong committed
56
57
58
59
                 box_type_3d: str = 'LiDAR',
                 filter_empty_gt: bool = True,
                 test_mode: bool = False,
                 pcd_limit_range: List[float] = [0, -40, -3, 70.4, 40, 0.0],
60
                 **kwargs) -> None:
jshilong's avatar
jshilong committed
61
62

        self.pcd_limit_range = pcd_limit_range
63
64
        assert task in ('lidar_det', 'mono_det')
        self.task = task
zhangwenwei's avatar
zhangwenwei committed
65
66
67
68
69
        super().__init__(
            data_root=data_root,
            ann_file=ann_file,
            pipeline=pipeline,
            modality=modality,
jshilong's avatar
jshilong committed
70
            default_cam_key=default_cam_key,
71
72
            box_type_3d=box_type_3d,
            filter_empty_gt=filter_empty_gt,
73
74
            test_mode=test_mode,
            **kwargs)
zhangwenwei's avatar
zhangwenwei committed
75
        assert self.modality is not None
jshilong's avatar
jshilong committed
76
        assert box_type_3d.lower() in ('lidar', 'camera')
zhangwenwei's avatar
zhangwenwei committed
77

jshilong's avatar
jshilong committed
78
79
    def parse_data_info(self, info: dict) -> dict:
        """Process the raw data info.
zhangwenwei's avatar
zhangwenwei committed
80

jshilong's avatar
jshilong committed
81
82
        The only difference with it in `Det3DDataset`
        is the specific process for `plane`.
83
84

        Args:
jshilong's avatar
jshilong committed
85
            info (dict): Raw info dict.
86
87

        Returns:
jshilong's avatar
jshilong committed
88
89
            dict: Has `ann_info` in training stage. And
            all path has been converted to absolute path.
90
        """
jshilong's avatar
jshilong committed
91
92
93
94
        if self.modality['use_lidar']:
            if 'plane' in info:
                # convert ground plane to velodyne coordinates
                plane = np.array(info['plane'])
zhangshilong's avatar
zhangshilong committed
95
96
                lidar2cam = np.array(
                    info['images']['CAM2']['lidar2cam'], dtype=np.float32)
jshilong's avatar
jshilong committed
97
98
99
100
101
102
103
104
105
106
107
108
109
110
                reverse = np.linalg.inv(lidar2cam)

                (plane_norm_cam, plane_off_cam) = (plane[:3],
                                                   -plane[:3] * plane[3])
                plane_norm_lidar = \
                    (reverse[:3, :3] @ plane_norm_cam[:, None])[:, 0]
                plane_off_lidar = (
                    reverse[:3, :3] @ plane_off_cam[:, None][:, 0] +
                    reverse[:3, 3])
                plane_lidar = np.zeros_like(plane_norm_lidar, shape=(4, ))
                plane_lidar[:3] = plane_norm_lidar
                plane_lidar[3] = -plane_norm_lidar.T @ plane_off_lidar
            else:
                plane_lidar = None
zhangwenwei's avatar
zhangwenwei committed
111

jshilong's avatar
jshilong committed
112
            info['plane'] = plane_lidar
zhangwenwei's avatar
zhangwenwei committed
113

114
115
116
        if self.task == 'mono_det':
            info['instances'] = info['cam_instances'][self.default_cam_key]

jshilong's avatar
jshilong committed
117
        info = super().parse_data_info(info)
zhangwenwei's avatar
zhangwenwei committed
118

jshilong's avatar
jshilong committed
119
        return info
zhangwenwei's avatar
zhangwenwei committed
120

121
    def parse_ann_info(self, info: dict) -> dict:
122
123
124
        """Get annotation info according to the given index.

        Args:
jshilong's avatar
jshilong committed
125
            info (dict): Data information of single data sample.
126
127

        Returns:
zhangwenwei's avatar
zhangwenwei committed
128
            dict: annotation information consists of the following keys:
129

zhangshilong's avatar
zhangshilong committed
130
                - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`):
wangtai's avatar
wangtai committed
131
                    3D ground truth bboxes.
jshilong's avatar
jshilong committed
132
                - bbox_labels_3d (np.ndarray): Labels of ground truths.
wangtai's avatar
wangtai committed
133
134
                - gt_bboxes (np.ndarray): 2D ground truth bboxes.
                - gt_labels (np.ndarray): Labels of ground truths.
135
136
                - difficulty (int): Difficulty defined by KITTI.
                    0, 1, 2 represent xxxxx respectively.
137
        """
jshilong's avatar
jshilong committed
138
        ann_info = super().parse_ann_info(info)
139
        if ann_info is None:
jshilong's avatar
jshilong committed
140
            ann_info = dict()
141
142
143
            # empty instance
            ann_info['gt_bboxes_3d'] = np.zeros((0, 7), dtype=np.float32)
            ann_info['gt_labels_3d'] = np.zeros(0, dtype=np.int64)
144

145
146
147
148
149
150
            if self.task == 'mono_det':
                ann_info['gt_bboxes'] = np.zeros((0, 4), dtype=np.float32)
                ann_info['gt_bboxes_labels'] = np.array(0, dtype=np.int64)
                ann_info['centers_2d'] = np.zeros((0, 2), dtype=np.float32)
                ann_info['depths'] = np.zeros((0), dtype=np.float32)

jshilong's avatar
jshilong committed
151
152
153
154
155
156
157
158
159
        ann_info = self._remove_dontcare(ann_info)
        # in kitti, lidar2cam = R0_rect @ Tr_velo_to_cam
        lidar2cam = np.array(info['images']['CAM2']['lidar2cam'])
        # convert gt_bboxes_3d to velodyne coordinates with `lidar2cam`
        gt_bboxes_3d = CameraInstance3DBoxes(
            ann_info['gt_bboxes_3d']).convert_to(self.box_mode_3d,
                                                 np.linalg.inv(lidar2cam))
        ann_info['gt_bboxes_3d'] = gt_bboxes_3d
        return ann_info