waymo_dataset.py 10 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
3
import os.path as osp
from typing import Callable, List, Optional, Union
4
5

import numpy as np
Wenwei Zhang's avatar
Wenwei Zhang committed
6

7
from mmdet3d.registry import DATASETS
8
9
from mmdet3d.structures import CameraInstance3DBoxes
from .det3d_dataset import Det3DDataset
Wenwei Zhang's avatar
Wenwei Zhang committed
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
from .kitti_dataset import KittiDataset


@DATASETS.register_module()
class WaymoDataset(KittiDataset):
    """Waymo Dataset.

    This class serves as the API for experiments on the Waymo Dataset.

    Please refer to `<https://waymo.com/open/download/>`_for data downloading.
    It is recommended to symlink the dataset root to $MMDETECTION3D/data and
    organize them as the doc shows.

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
26
27
        data_prefix (dict): data prefix for point cloud and
            camera data dict. Default to dict(
28
29
30
31
32
33
                                    pts='velodyne',
                                    CAM_FRONT='image_0',
                                    CAM_FRONT_RIGHT='image_1',
                                    CAM_FRONT_LEFT='image_2',
                                    CAM_SIDE_RIGHT='image_3',
                                    CAM_SIDE_LEFT='image_4')
Wenwei Zhang's avatar
Wenwei Zhang committed
34
35
36
        pipeline (list[dict], optional): Pipeline used for data processing.
            Defaults to None.
        modality (dict, optional): Modality to specify the sensor data used
37
            as input. Defaults to dict(use_lidar=True).
38
        default_cam_key (str, optional): Default camera key for lidar2img
39
            association. Defaults to 'CAM_FRONT'.
Wenwei Zhang's avatar
Wenwei Zhang committed
40
41
42
        box_type_3d (str, optional): Type of 3D box of this dataset.
            Based on the `box_type_3d`, the dataset will encapsulate the box
            to its original format then converted them to `box_type_3d`.
43
44
            Defaults to 'LiDAR' in this dataset. Available options includes:

45
46
47
            - 'LiDAR': Box in LiDAR coordinates.
            - 'Depth': Box in depth coordinates, usually for indoor dataset.
            - 'Camera': Box in camera coordinates.
Wenwei Zhang's avatar
Wenwei Zhang committed
48
49
50
51
        filter_empty_gt (bool, optional): Whether to filter empty GT.
            Defaults to True.
        test_mode (bool, optional): Whether the dataset is in test mode.
            Defaults to False.
52
53
54
        pcd_limit_range (list[float], optional): The range of point cloud
            used to filter invalid predicted boxes.
            Defaults to [-85, -85, -5, 85, 85, 5].
55
        cam_sync_instances (bool, optional): If use the camera sync label
56
            supported from waymo version 1.3.1. Defaults to False.
57
        load_interval (int, optional): load frame interval.
58
            Defaults to 1.
59
60
61
        task (str, optional): task for 3D detection (lidar, mono3d).
            lidar: take all the ground trurh in the frame.
            mono3d: take the groundtruth that can be seen in the cam.
62
63
            Defaults to 'lidar'.
        max_sweeps (int, optional): max sweep for each frame. Defaults to 0.
Wenwei Zhang's avatar
Wenwei Zhang committed
64
    """
65
    METAINFO = {'CLASSES': ('Car', 'Pedestrian', 'Cyclist')}
Wenwei Zhang's avatar
Wenwei Zhang committed
66
67

    def __init__(self,
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
                 data_root: str,
                 ann_file: str,
                 data_prefix: dict = dict(
                     pts='velodyne',
                     CAM_FRONT='image_0',
                     CAM_FRONT_RIGHT='image_1',
                     CAM_FRONT_LEFT='image_2',
                     CAM_SIDE_RIGHT='image_3',
                     CAM_SIDE_LEFT='image_4'),
                 pipeline: List[Union[dict, Callable]] = [],
                 modality: Optional[dict] = dict(use_lidar=True),
                 default_cam_key: str = 'CAM_FRONT',
                 box_type_3d: str = 'LiDAR',
                 filter_empty_gt: bool = True,
                 test_mode: bool = False,
                 pcd_limit_range: List[float] = [0, -40, -3, 70.4, 40, 0.0],
                 cam_sync_instances=False,
Wenwei Zhang's avatar
Wenwei Zhang committed
85
                 load_interval=1,
86
                 task='lidar_det',
87
                 max_sweeps=0,
88
                 **kwargs):
89
90
91
92
        self.load_interval = load_interval
        # set loading mode for different task settings
        self.cam_sync_instances = cam_sync_instances
        # construct self.cat_ids for vision-only anns parsing
93
        self.cat_ids = range(len(self.METAINFO['CLASSES']))
94
95
96
97
98
99
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self.max_sweeps = max_sweeps
        self.task = task
        # we do not provide file_client_args to custom_3d init
        # because we want disk loading for info
        # while ceph loading for KITTI2Waymo
Wenwei Zhang's avatar
Wenwei Zhang committed
100
101
102
103
104
105
106
        super().__init__(
            data_root=data_root,
            ann_file=ann_file,
            pipeline=pipeline,
            modality=modality,
            box_type_3d=box_type_3d,
            filter_empty_gt=filter_empty_gt,
107
            pcd_limit_range=pcd_limit_range,
108
109
110
            default_cam_key=default_cam_key,
            data_prefix=data_prefix,
            test_mode=test_mode,
111
            **kwargs)
Wenwei Zhang's avatar
Wenwei Zhang committed
112

113
114
    def parse_ann_info(self, info: dict) -> dict:
        """Get annotation info according to the given index.
Wenwei Zhang's avatar
Wenwei Zhang committed
115
116

        Args:
117
            info (dict): Data information of single data sample.
Wenwei Zhang's avatar
Wenwei Zhang committed
118
119

        Returns:
120
121
122
123
124
125
126
127
128
            dict: annotation information consists of the following keys:

                - bboxes_3d (:obj:`LiDARInstance3DBoxes`):
                    3D ground truth bboxes.
                - bbox_labels_3d (np.ndarray): Labels of ground truths.
                - gt_bboxes (np.ndarray): 2D ground truth bboxes.
                - gt_labels (np.ndarray): Labels of ground truths.
                - difficulty (int): Difficulty defined by KITTI.
                    0, 1, 2 represent xxxxx respectively.
Wenwei Zhang's avatar
Wenwei Zhang committed
129
        """
130
131
132
        ann_info = Det3DDataset.parse_ann_info(self, info)
        if ann_info is None:
            # empty instance
133
134
135
            ann_info = {}
            ann_info['gt_bboxes_3d'] = np.zeros((0, 7), dtype=np.float32)
            ann_info['gt_labels_3d'] = np.zeros(0, dtype=np.int64)
136
137
138
139
140
141

        ann_info = self._remove_dontcare(ann_info)
        # in kitti, lidar2cam = R0_rect @ Tr_velo_to_cam
        # convert gt_bboxes_3d to velodyne coordinates with `lidar2cam`
        if 'gt_bboxes' in ann_info:
            gt_bboxes = ann_info['gt_bboxes']
142
            gt_bboxes_labels = ann_info['gt_bboxes_labels']
Wenwei Zhang's avatar
Wenwei Zhang committed
143
        else:
144
            gt_bboxes = np.zeros((0, 4), dtype=np.float32)
145
            gt_bboxes_labels = np.zeros(0, dtype=np.int64)
146
147
148
        if 'centers_2d' in ann_info:
            centers_2d = ann_info['centers_2d']
            depths = ann_info['depths']
Wenwei Zhang's avatar
Wenwei Zhang committed
149
        else:
150
151
            centers_2d = np.zeros((0, 2), dtype=np.float32)
            depths = np.zeros((0), dtype=np.float32)
Wenwei Zhang's avatar
Wenwei Zhang committed
152

153
        if self.task == 'mono_det':
154
155
156
157
            gt_bboxes_3d = CameraInstance3DBoxes(
                ann_info['gt_bboxes_3d'],
                box_dim=ann_info['gt_bboxes_3d'].shape[-1],
                origin=(0.5, 0.5, 0.5))
Wenwei Zhang's avatar
Wenwei Zhang committed
158
159

        else:
160
161
            # in waymo, lidar2cam = R0_rect @ Tr_velo_to_cam
            # convert gt_bboxes_3d to velodyne coordinates with `lidar2cam`
162
163
164
165
166
            lidar2cam = np.array(
                info['images'][self.default_cam_key]['lidar2cam'])
            gt_bboxes_3d = CameraInstance3DBoxes(
                ann_info['gt_bboxes_3d']).convert_to(self.box_mode_3d,
                                                     np.linalg.inv(lidar2cam))
167
        ann_info['gt_bboxes_3d'] = gt_bboxes_3d
168
169
170
171
172

        anns_results = dict(
            gt_bboxes_3d=gt_bboxes_3d,
            gt_labels_3d=ann_info['gt_labels_3d'],
            gt_bboxes=gt_bboxes,
173
            gt_bboxes_labels=gt_bboxes_labels,
174
175
176
177
178
179
180
181
182
183
184
185
186
187
            centers_2d=centers_2d,
            depths=depths)

        return anns_results

    def load_data_list(self) -> List[dict]:
        """Add the load interval."""
        data_list = super().load_data_list()
        data_list = data_list[::self.load_interval]
        return data_list

    def parse_data_info(self, info: dict) -> dict:
        """if task is lidar or multiview det, use super() method elif task is
        mono3d, split the info from frame-wise to img-wise."""
188
        if self.task != 'mono_det':
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
            if self.cam_sync_instances:
                # use the cam sync labels
                info['instances'] = info['cam_sync_instances']
            return super().parse_data_info(info)
        else:
            # in the mono3d, the instances is from cam sync.
            data_list = []
            if self.modality['use_lidar']:
                info['lidar_points']['lidar_path'] =  \
                    osp.join(
                        self.data_prefix.get('pts', ''),
                        info['lidar_points']['lidar_path'])

            if self.modality['use_camera']:
                for cam_key, img_info in info['images'].items():
                    if 'img_path' in img_info:
                        cam_prefix = self.data_prefix.get(cam_key, '')
                        img_info['img_path'] = osp.join(
                            cam_prefix, img_info['img_path'])

            for (cam_key, img_info) in info['images'].items():
                camera_info = dict()
                camera_info['images'] = dict()
                camera_info['images'][cam_key] = img_info
                if 'cam_instances' in info \
                        and cam_key in info['cam_instances']:
                    camera_info['instances'] = info['cam_instances'][cam_key]
                else:
                    camera_info['instances'] = []
                camera_info['ego2global'] = info['ego2global']
                if 'image_sweeps' in info:
                    camera_info['image_sweeps'] = info['image_sweeps']

                # TODO check if need to modify the sample id
                # TODO check when will use it except for evaluation.
224
                camera_info['sample_idx'] = info['sample_idx']
225
226
227
228
229
230
231
232

                if not self.test_mode:
                    # used in training
                    camera_info['ann_info'] = self.parse_ann_info(camera_info)
                if self.test_mode and self.load_eval_anns:
                    info['eval_ann_info'] = self.parse_ann_info(info)
                data_list.append(camera_info)
            return data_list