waymo_dataset.py 10.1 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
import os.path as osp
3
from typing import Callable, List, Union
4
5

import numpy as np
Wenwei Zhang's avatar
Wenwei Zhang committed
6

7
from mmdet3d.registry import DATASETS
8
9
from mmdet3d.structures import CameraInstance3DBoxes
from .det3d_dataset import Det3DDataset
Wenwei Zhang's avatar
Wenwei Zhang committed
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
from .kitti_dataset import KittiDataset


@DATASETS.register_module()
class WaymoDataset(KittiDataset):
    """Waymo Dataset.

    This class serves as the API for experiments on the Waymo Dataset.

    Please refer to `<https://waymo.com/open/download/>`_for data downloading.
    It is recommended to symlink the dataset root to $MMDETECTION3D/data and
    organize them as the doc shows.

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
26
        data_prefix (dict): data prefix for point cloud and
27
            camera data dict. Defaults to dict(
28
29
30
31
32
33
                                    pts='velodyne',
                                    CAM_FRONT='image_0',
                                    CAM_FRONT_RIGHT='image_1',
                                    CAM_FRONT_LEFT='image_2',
                                    CAM_SIDE_RIGHT='image_3',
                                    CAM_SIDE_LEFT='image_4')
34
35
36
        pipeline (list[dict]): Pipeline used for data processing.
            Defaults to [].
        modality (dict): Modality to specify the sensor data used
37
            as input. Defaults to dict(use_lidar=True).
38
        default_cam_key (str): Default camera key for lidar2img
39
            association. Defaults to 'CAM_FRONT'.
40
        box_type_3d (str): Type of 3D box of this dataset.
Wenwei Zhang's avatar
Wenwei Zhang committed
41
42
            Based on the `box_type_3d`, the dataset will encapsulate the box
            to its original format then converted them to `box_type_3d`.
43
44
            Defaults to 'LiDAR' in this dataset. Available options includes:

45
46
47
            - 'LiDAR': Box in LiDAR coordinates.
            - 'Depth': Box in depth coordinates, usually for indoor dataset.
            - 'Camera': Box in camera coordinates.
48
49
50
51
52
        filter_empty_gt (bool): Whether to filter the data with empty GT.
            If it's set to be True, the example with empty annotations after
            data pipeline will be dropped and a random example will be chosen
            in `__getitem__`. Defaults to True.
        test_mode (bool): Whether the dataset is in test mode.
Wenwei Zhang's avatar
Wenwei Zhang committed
53
            Defaults to False.
54
        pcd_limit_range (list[float]): The range of point cloud
55
56
            used to filter invalid predicted boxes.
            Defaults to [-85, -85, -5, 85, 85, 5].
57
        cam_sync_instances (bool): If use the camera sync label
58
            supported from waymo version 1.3.1. Defaults to False.
59
60
        load_interval (int): load frame interval. Defaults to 1.
        task (str): task for 3D detection (lidar, mono3d).
61
62
            lidar: take all the ground trurh in the frame.
            mono3d: take the groundtruth that can be seen in the cam.
63
64
            Defaults to 'lidar_det'.
        max_sweeps (int): max sweep for each frame. Defaults to 0.
Wenwei Zhang's avatar
Wenwei Zhang committed
65
    """
66
    METAINFO = {'CLASSES': ('Car', 'Pedestrian', 'Cyclist')}
Wenwei Zhang's avatar
Wenwei Zhang committed
67
68

    def __init__(self,
69
70
71
72
73
74
75
76
77
78
                 data_root: str,
                 ann_file: str,
                 data_prefix: dict = dict(
                     pts='velodyne',
                     CAM_FRONT='image_0',
                     CAM_FRONT_RIGHT='image_1',
                     CAM_FRONT_LEFT='image_2',
                     CAM_SIDE_RIGHT='image_3',
                     CAM_SIDE_LEFT='image_4'),
                 pipeline: List[Union[dict, Callable]] = [],
79
                 modality: dict = dict(use_lidar=True),
80
81
82
83
84
                 default_cam_key: str = 'CAM_FRONT',
                 box_type_3d: str = 'LiDAR',
                 filter_empty_gt: bool = True,
                 test_mode: bool = False,
                 pcd_limit_range: List[float] = [0, -40, -3, 70.4, 40, 0.0],
85
86
87
88
89
                 cam_sync_instances: bool = False,
                 load_interval: int = 1,
                 task: str = 'lidar_det',
                 max_sweeps: int = 0,
                 **kwargs) -> None:
90
91
92
93
        self.load_interval = load_interval
        # set loading mode for different task settings
        self.cam_sync_instances = cam_sync_instances
        # construct self.cat_ids for vision-only anns parsing
94
        self.cat_ids = range(len(self.METAINFO['CLASSES']))
95
96
97
98
99
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self.max_sweeps = max_sweeps
        # we do not provide file_client_args to custom_3d init
        # because we want disk loading for info
        # while ceph loading for KITTI2Waymo
Wenwei Zhang's avatar
Wenwei Zhang committed
100
101
102
103
104
105
106
        super().__init__(
            data_root=data_root,
            ann_file=ann_file,
            pipeline=pipeline,
            modality=modality,
            box_type_3d=box_type_3d,
            filter_empty_gt=filter_empty_gt,
107
            pcd_limit_range=pcd_limit_range,
108
109
110
            default_cam_key=default_cam_key,
            data_prefix=data_prefix,
            test_mode=test_mode,
111
            task=task,
112
            **kwargs)
Wenwei Zhang's avatar
Wenwei Zhang committed
113

114
    def parse_ann_info(self, info: dict) -> dict:
115
        """Process the `instances` in data info to `ann_info`.
Wenwei Zhang's avatar
Wenwei Zhang committed
116
117

        Args:
118
            info (dict): Data information of single data sample.
Wenwei Zhang's avatar
Wenwei Zhang committed
119
120

        Returns:
121
122
123
            dict: annotation information consists of the following keys:

                - bboxes_3d (:obj:`LiDARInstance3DBoxes`):
124
                  3D ground truth bboxes.
125
126
127
128
                - bbox_labels_3d (np.ndarray): Labels of ground truths.
                - gt_bboxes (np.ndarray): 2D ground truth bboxes.
                - gt_labels (np.ndarray): Labels of ground truths.
                - difficulty (int): Difficulty defined by KITTI.
129
                  0, 1, 2 represent xxxxx respectively.
Wenwei Zhang's avatar
Wenwei Zhang committed
130
        """
131
132
133
        ann_info = Det3DDataset.parse_ann_info(self, info)
        if ann_info is None:
            # empty instance
134
135
136
            ann_info = {}
            ann_info['gt_bboxes_3d'] = np.zeros((0, 7), dtype=np.float32)
            ann_info['gt_labels_3d'] = np.zeros(0, dtype=np.int64)
137
138
139
140
141
142

        ann_info = self._remove_dontcare(ann_info)
        # in kitti, lidar2cam = R0_rect @ Tr_velo_to_cam
        # convert gt_bboxes_3d to velodyne coordinates with `lidar2cam`
        if 'gt_bboxes' in ann_info:
            gt_bboxes = ann_info['gt_bboxes']
143
            gt_bboxes_labels = ann_info['gt_bboxes_labels']
Wenwei Zhang's avatar
Wenwei Zhang committed
144
        else:
145
            gt_bboxes = np.zeros((0, 4), dtype=np.float32)
146
            gt_bboxes_labels = np.zeros(0, dtype=np.int64)
147
148
149
        if 'centers_2d' in ann_info:
            centers_2d = ann_info['centers_2d']
            depths = ann_info['depths']
Wenwei Zhang's avatar
Wenwei Zhang committed
150
        else:
151
152
            centers_2d = np.zeros((0, 2), dtype=np.float32)
            depths = np.zeros((0), dtype=np.float32)
Wenwei Zhang's avatar
Wenwei Zhang committed
153

154
        if self.task == 'mono_det':
155
156
157
158
            gt_bboxes_3d = CameraInstance3DBoxes(
                ann_info['gt_bboxes_3d'],
                box_dim=ann_info['gt_bboxes_3d'].shape[-1],
                origin=(0.5, 0.5, 0.5))
Wenwei Zhang's avatar
Wenwei Zhang committed
159
160

        else:
161
162
            # in waymo, lidar2cam = R0_rect @ Tr_velo_to_cam
            # convert gt_bboxes_3d to velodyne coordinates with `lidar2cam`
163
164
165
166
167
            lidar2cam = np.array(
                info['images'][self.default_cam_key]['lidar2cam'])
            gt_bboxes_3d = CameraInstance3DBoxes(
                ann_info['gt_bboxes_3d']).convert_to(self.box_mode_3d,
                                                     np.linalg.inv(lidar2cam))
168
        ann_info['gt_bboxes_3d'] = gt_bboxes_3d
169
170
171
172
173

        anns_results = dict(
            gt_bboxes_3d=gt_bboxes_3d,
            gt_labels_3d=ann_info['gt_labels_3d'],
            gt_bboxes=gt_bboxes,
174
            gt_bboxes_labels=gt_bboxes_labels,
175
176
177
178
179
180
181
182
183
184
185
186
187
188
            centers_2d=centers_2d,
            depths=depths)

        return anns_results

    def load_data_list(self) -> List[dict]:
        """Add the load interval."""
        data_list = super().load_data_list()
        data_list = data_list[::self.load_interval]
        return data_list

    def parse_data_info(self, info: dict) -> dict:
        """if task is lidar or multiview det, use super() method elif task is
        mono3d, split the info from frame-wise to img-wise."""
189
        if self.task != 'mono_det':
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
            if self.cam_sync_instances:
                # use the cam sync labels
                info['instances'] = info['cam_sync_instances']
            return super().parse_data_info(info)
        else:
            # in the mono3d, the instances is from cam sync.
            data_list = []
            if self.modality['use_lidar']:
                info['lidar_points']['lidar_path'] =  \
                    osp.join(
                        self.data_prefix.get('pts', ''),
                        info['lidar_points']['lidar_path'])

            if self.modality['use_camera']:
                for cam_key, img_info in info['images'].items():
                    if 'img_path' in img_info:
                        cam_prefix = self.data_prefix.get(cam_key, '')
                        img_info['img_path'] = osp.join(
                            cam_prefix, img_info['img_path'])

            for (cam_key, img_info) in info['images'].items():
                camera_info = dict()
                camera_info['images'] = dict()
                camera_info['images'][cam_key] = img_info
                if 'cam_instances' in info \
                        and cam_key in info['cam_instances']:
                    camera_info['instances'] = info['cam_instances'][cam_key]
                else:
                    camera_info['instances'] = []
                camera_info['ego2global'] = info['ego2global']
                if 'image_sweeps' in info:
                    camera_info['image_sweeps'] = info['image_sweeps']

                # TODO check if need to modify the sample id
                # TODO check when will use it except for evaluation.
225
                camera_info['sample_id'] = info['sample_id']
226
227
228
229
230
231
232
233

                if not self.test_mode:
                    # used in training
                    camera_info['ann_info'] = self.parse_ann_info(camera_info)
                if self.test_mode and self.load_eval_anns:
                    info['eval_ann_info'] = self.parse_ann_info(info)
                data_list.append(camera_info)
            return data_list