waymo_dataset.py 10.7 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
import os.path as osp
3
from typing import Callable, List, Union
4
5

import numpy as np
Wenwei Zhang's avatar
Wenwei Zhang committed
6

7
from mmdet3d.registry import DATASETS
8
9
from mmdet3d.structures import CameraInstance3DBoxes
from .det3d_dataset import Det3DDataset
Wenwei Zhang's avatar
Wenwei Zhang committed
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
from .kitti_dataset import KittiDataset


@DATASETS.register_module()
class WaymoDataset(KittiDataset):
    """Waymo Dataset.

    This class serves as the API for experiments on the Waymo Dataset.

    Please refer to `<https://waymo.com/open/download/>`_for data downloading.
    It is recommended to symlink the dataset root to $MMDETECTION3D/data and
    organize them as the doc shows.

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
26
        data_prefix (dict): data prefix for point cloud and
27
            camera data dict. Defaults to dict(
28
29
                                    pts='velodyne',
                                    CAM_FRONT='image_0',
30
31
32
33
                                    CAM_FRONT_LEFT='image_1',
                                    CAM_FRONT_RIGHT='image_2',
                                    CAM_SIDE_LEFT='image_3',
                                    CAM_SIDE_RIGHT='image_4')
34
        pipeline (List[dict]): Pipeline used for data processing.
35
36
            Defaults to [].
        modality (dict): Modality to specify the sensor data used
37
            as input. Defaults to dict(use_lidar=True).
38
        default_cam_key (str): Default camera key for lidar2img
39
            association. Defaults to 'CAM_FRONT'.
40
        box_type_3d (str): Type of 3D box of this dataset.
Wenwei Zhang's avatar
Wenwei Zhang committed
41
42
            Based on the `box_type_3d`, the dataset will encapsulate the box
            to its original format then converted them to `box_type_3d`.
43
44
            Defaults to 'LiDAR' in this dataset. Available options includes:

45
46
47
            - 'LiDAR': Box in LiDAR coordinates.
            - 'Depth': Box in depth coordinates, usually for indoor dataset.
            - 'Camera': Box in camera coordinates.
48
49
50
51
52
53
54
55
56
        load_type (str): Type of loading mode. Defaults to 'frame_based'.

            - 'frame_based': Load all of the instances in the frame.
            - 'mv_image_based': Load all of the instances in the frame and need
                to convert to the FOV-based data type to support image-based
                detector.
            - 'fov_image_based': Only load the instances inside the default
                cam, and need to convert to the FOV-based data type to support
                image-based detector.
57
58
59
60
61
        filter_empty_gt (bool): Whether to filter the data with empty GT.
            If it's set to be True, the example with empty annotations after
            data pipeline will be dropped and a random example will be chosen
            in `__getitem__`. Defaults to True.
        test_mode (bool): Whether the dataset is in test mode.
Wenwei Zhang's avatar
Wenwei Zhang committed
62
            Defaults to False.
63
        pcd_limit_range (List[float]): The range of point cloud
64
65
            used to filter invalid predicted boxes.
            Defaults to [-85, -85, -5, 85, 85, 5].
66
        cam_sync_instances (bool): If use the camera sync label
67
            supported from waymo version 1.3.1. Defaults to False.
68
69
        load_interval (int): load frame interval. Defaults to 1.
        max_sweeps (int): max sweep for each frame. Defaults to 0.
Wenwei Zhang's avatar
Wenwei Zhang committed
70
    """
71
72
73
74
75
76
77
78
    METAINFO = {
        'classes': ('Car', 'Pedestrian', 'Cyclist'),
        'palette': [
            (0, 120, 255),  # Waymo Blue
            (0, 232, 157),  # Waymo Green
            (255, 205, 85)  # Amber
        ]
    }
Wenwei Zhang's avatar
Wenwei Zhang committed
79
80

    def __init__(self,
81
82
83
84
85
                 data_root: str,
                 ann_file: str,
                 data_prefix: dict = dict(
                     pts='velodyne',
                     CAM_FRONT='image_0',
86
87
88
89
                     CAM_FRONT_LEFT='image_1',
                     CAM_FRONT_RIGHT='image_2',
                     CAM_SIDE_LEFT='image_3',
                     CAM_SIDE_RIGHT='image_4'),
90
                 pipeline: List[Union[dict, Callable]] = [],
91
                 modality: dict = dict(use_lidar=True),
92
93
                 default_cam_key: str = 'CAM_FRONT',
                 box_type_3d: str = 'LiDAR',
94
                 load_type: str = 'frame_based',
95
96
97
                 filter_empty_gt: bool = True,
                 test_mode: bool = False,
                 pcd_limit_range: List[float] = [0, -40, -3, 70.4, 40, 0.0],
98
99
100
101
                 cam_sync_instances: bool = False,
                 load_interval: int = 1,
                 max_sweeps: int = 0,
                 **kwargs) -> None:
102
103
104
105
        self.load_interval = load_interval
        # set loading mode for different task settings
        self.cam_sync_instances = cam_sync_instances
        # construct self.cat_ids for vision-only anns parsing
106
        self.cat_ids = range(len(self.METAINFO['classes']))
107
108
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self.max_sweeps = max_sweeps
109
        # we do not provide backend_args to custom_3d init
110
        # because we want disk loading for info
111
        # while ceph loading for Prediction2Waymo
Wenwei Zhang's avatar
Wenwei Zhang committed
112
113
114
115
116
117
118
        super().__init__(
            data_root=data_root,
            ann_file=ann_file,
            pipeline=pipeline,
            modality=modality,
            box_type_3d=box_type_3d,
            filter_empty_gt=filter_empty_gt,
119
            pcd_limit_range=pcd_limit_range,
120
121
122
            default_cam_key=default_cam_key,
            data_prefix=data_prefix,
            test_mode=test_mode,
123
            load_type=load_type,
124
            **kwargs)
Wenwei Zhang's avatar
Wenwei Zhang committed
125

126
    def parse_ann_info(self, info: dict) -> dict:
127
        """Process the `instances` in data info to `ann_info`.
Wenwei Zhang's avatar
Wenwei Zhang committed
128
129

        Args:
130
            info (dict): Data information of single data sample.
Wenwei Zhang's avatar
Wenwei Zhang committed
131
132

        Returns:
133
            dict: Annotation information consists of the following keys:
134
135

                - bboxes_3d (:obj:`LiDARInstance3DBoxes`):
136
                  3D ground truth bboxes.
137
138
139
140
                - bbox_labels_3d (np.ndarray): Labels of ground truths.
                - gt_bboxes (np.ndarray): 2D ground truth bboxes.
                - gt_labels (np.ndarray): Labels of ground truths.
                - difficulty (int): Difficulty defined by KITTI.
141
                  0, 1, 2 represent xxxxx respectively.
Wenwei Zhang's avatar
Wenwei Zhang committed
142
        """
143
144
145
        ann_info = Det3DDataset.parse_ann_info(self, info)
        if ann_info is None:
            # empty instance
146
147
148
            ann_info = {}
            ann_info['gt_bboxes_3d'] = np.zeros((0, 7), dtype=np.float32)
            ann_info['gt_labels_3d'] = np.zeros(0, dtype=np.int64)
149
150
151
152
153
154

        ann_info = self._remove_dontcare(ann_info)
        # in kitti, lidar2cam = R0_rect @ Tr_velo_to_cam
        # convert gt_bboxes_3d to velodyne coordinates with `lidar2cam`
        if 'gt_bboxes' in ann_info:
            gt_bboxes = ann_info['gt_bboxes']
155
            gt_bboxes_labels = ann_info['gt_bboxes_labels']
Wenwei Zhang's avatar
Wenwei Zhang committed
156
        else:
157
            gt_bboxes = np.zeros((0, 4), dtype=np.float32)
158
            gt_bboxes_labels = np.zeros(0, dtype=np.int64)
159
160
161
        if 'centers_2d' in ann_info:
            centers_2d = ann_info['centers_2d']
            depths = ann_info['depths']
Wenwei Zhang's avatar
Wenwei Zhang committed
162
        else:
163
164
            centers_2d = np.zeros((0, 2), dtype=np.float32)
            depths = np.zeros((0), dtype=np.float32)
Wenwei Zhang's avatar
Wenwei Zhang committed
165

166
167
168
169
170
171
        # in waymo, lidar2cam = R0_rect @ Tr_velo_to_cam
        # convert gt_bboxes_3d to velodyne coordinates with `lidar2cam`
        lidar2cam = np.array(info['images'][self.default_cam_key]['lidar2cam'])
        gt_bboxes_3d = CameraInstance3DBoxes(
            ann_info['gt_bboxes_3d']).convert_to(self.box_mode_3d,
                                                 np.linalg.inv(lidar2cam))
172
        ann_info['gt_bboxes_3d'] = gt_bboxes_3d
173
174
175
176
177

        anns_results = dict(
            gt_bboxes_3d=gt_bboxes_3d,
            gt_labels_3d=ann_info['gt_labels_3d'],
            gt_bboxes=gt_bboxes,
178
            gt_bboxes_labels=gt_bboxes_labels,
179
180
181
182
183
184
185
186
187
188
189
            centers_2d=centers_2d,
            depths=depths)

        return anns_results

    def load_data_list(self) -> List[dict]:
        """Add the load interval."""
        data_list = super().load_data_list()
        data_list = data_list[::self.load_interval]
        return data_list

190
    def parse_data_info(self, info: dict) -> Union[dict, List[dict]]:
191
192
        """if task is lidar or multiview det, use super() method elif task is
        mono3d, split the info from frame-wise to img-wise."""
193
194
195
196
197
198
199
200
201
202
203
204
205

        if self.cam_sync_instances:
            info['instances'] = info['cam_sync_instances']

        if self.load_type == 'frame_based':
            return super().parse_data_info(info)
        elif self.load_type == 'fov_image_based':
            # only loading the fov image and the fov instance
            new_image_info = {}
            new_image_info[self.default_cam_key] = \
                info['images'][self.default_cam_key]
            info['images'] = new_image_info
            info['instances'] = info['cam_instances'][self.default_cam_key]
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
            return super().parse_data_info(info)
        else:
            # in the mono3d, the instances is from cam sync.
            data_list = []
            if self.modality['use_lidar']:
                info['lidar_points']['lidar_path'] =  \
                    osp.join(
                        self.data_prefix.get('pts', ''),
                        info['lidar_points']['lidar_path'])

            if self.modality['use_camera']:
                for cam_key, img_info in info['images'].items():
                    if 'img_path' in img_info:
                        cam_prefix = self.data_prefix.get(cam_key, '')
                        img_info['img_path'] = osp.join(
                            cam_prefix, img_info['img_path'])

            for (cam_key, img_info) in info['images'].items():
                camera_info = dict()
                camera_info['images'] = dict()
                camera_info['images'][cam_key] = img_info
                if 'cam_instances' in info \
                        and cam_key in info['cam_instances']:
                    camera_info['instances'] = info['cam_instances'][cam_key]
                else:
                    camera_info['instances'] = []
                camera_info['ego2global'] = info['ego2global']
                if 'image_sweeps' in info:
                    camera_info['image_sweeps'] = info['image_sweeps']

                # TODO check if need to modify the sample id
                # TODO check when will use it except for evaluation.
238
                camera_info['sample_idx'] = info['sample_idx']
239
240
241
242
243
244
245
246

                if not self.test_mode:
                    # used in training
                    camera_info['ann_info'] = self.parse_ann_info(camera_info)
                if self.test_mode and self.load_eval_anns:
                    info['eval_ann_info'] = self.parse_ann_info(info)
                data_list.append(camera_info)
            return data_list