waymo_dataset.py 9.72 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
3
import os.path as osp
from typing import Callable, List, Optional, Union
4
5

import numpy as np
Wenwei Zhang's avatar
Wenwei Zhang committed
6

7
from mmdet3d.registry import DATASETS
8
9
from mmdet3d.structures import CameraInstance3DBoxes
from .det3d_dataset import Det3DDataset
Wenwei Zhang's avatar
Wenwei Zhang committed
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
from .kitti_dataset import KittiDataset


@DATASETS.register_module()
class WaymoDataset(KittiDataset):
    """Waymo Dataset.

    This class serves as the API for experiments on the Waymo Dataset.

    Please refer to `<https://waymo.com/open/download/>`_for data downloading.
    It is recommended to symlink the dataset root to $MMDETECTION3D/data and
    organize them as the doc shows.

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
26
27
28
29
30
31
32
33
        data_prefix (list[dict]): data prefix for point cloud and
            camera data dict, default to dict(
                                    pts='velodyne',
                                    CAM_FRONT='image_0',
                                    CAM_FRONT_RIGHT='image_1',
                                    CAM_FRONT_LEFT='image_2',
                                    CAM_SIDE_RIGHT='image_3',
                                    CAM_SIDE_LEFT='image_4')
Wenwei Zhang's avatar
Wenwei Zhang committed
34
35
36
        pipeline (list[dict], optional): Pipeline used for data processing.
            Defaults to None.
        modality (dict, optional): Modality to specify the sensor data used
37
38
39
            as input. Defaults to `dict(use_lidar=True)`.
        default_cam_key (str, optional): Default camera key for lidar2img
            association.
Wenwei Zhang's avatar
Wenwei Zhang committed
40
41
42
43
        box_type_3d (str, optional): Type of 3D box of this dataset.
            Based on the `box_type_3d`, the dataset will encapsulate the box
            to its original format then converted them to `box_type_3d`.
            Defaults to 'LiDAR' in this dataset. Available options includes
44
45
46
            - 'LiDAR': Box in LiDAR coordinates.
            - 'Depth': Box in depth coordinates, usually for indoor dataset.
            - 'Camera': Box in camera coordinates.
Wenwei Zhang's avatar
Wenwei Zhang committed
47
48
49
50
        filter_empty_gt (bool, optional): Whether to filter empty GT.
            Defaults to True.
        test_mode (bool, optional): Whether the dataset is in test mode.
            Defaults to False.
51
52
        pcd_limit_range (list, optional): The range of point cloud used to
            filter invalid predicted boxes.
53
            Default: [-85, -85, -5, 85, 85, 5].
54
55
56
57
58
59
60
        cam_sync_instances (bool, optional): If use the camera sync label
            supported from waymo version 1.3.1.
        load_interval (int, optional): load frame interval.
        task (str, optional): task for 3D detection (lidar, mono3d).
            lidar: take all the ground trurh in the frame.
            mono3d: take the groundtruth that can be seen in the cam.
        max_sweeps (int, optional): max sweep for each frame.
Wenwei Zhang's avatar
Wenwei Zhang committed
61
    """
62
    METAINFO = {'CLASSES': ('Car', 'Pedestrian', 'Cyclist')}
Wenwei Zhang's avatar
Wenwei Zhang committed
63
64

    def __init__(self,
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
                 data_root: str,
                 ann_file: str,
                 data_prefix: dict = dict(
                     pts='velodyne',
                     CAM_FRONT='image_0',
                     CAM_FRONT_RIGHT='image_1',
                     CAM_FRONT_LEFT='image_2',
                     CAM_SIDE_RIGHT='image_3',
                     CAM_SIDE_LEFT='image_4'),
                 pipeline: List[Union[dict, Callable]] = [],
                 modality: Optional[dict] = dict(use_lidar=True),
                 default_cam_key: str = 'CAM_FRONT',
                 box_type_3d: str = 'LiDAR',
                 filter_empty_gt: bool = True,
                 test_mode: bool = False,
                 pcd_limit_range: List[float] = [0, -40, -3, 70.4, 40, 0.0],
                 cam_sync_instances=False,
Wenwei Zhang's avatar
Wenwei Zhang committed
82
                 load_interval=1,
83
84
                 task='lidar',
                 max_sweeps=0,
85
                 **kwargs):
86
87
88
89
        self.load_interval = load_interval
        # set loading mode for different task settings
        self.cam_sync_instances = cam_sync_instances
        # construct self.cat_ids for vision-only anns parsing
90
        self.cat_ids = range(len(self.METAINFO['CLASSES']))
91
92
93
94
95
96
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self.max_sweeps = max_sweeps
        self.task = task
        # we do not provide file_client_args to custom_3d init
        # because we want disk loading for info
        # while ceph loading for KITTI2Waymo
Wenwei Zhang's avatar
Wenwei Zhang committed
97
98
99
100
101
102
103
        super().__init__(
            data_root=data_root,
            ann_file=ann_file,
            pipeline=pipeline,
            modality=modality,
            box_type_3d=box_type_3d,
            filter_empty_gt=filter_empty_gt,
104
            pcd_limit_range=pcd_limit_range,
105
106
107
            default_cam_key=default_cam_key,
            data_prefix=data_prefix,
            test_mode=test_mode,
108
            **kwargs)
Wenwei Zhang's avatar
Wenwei Zhang committed
109

110
111
    def parse_ann_info(self, info: dict) -> dict:
        """Get annotation info according to the given index.
Wenwei Zhang's avatar
Wenwei Zhang committed
112
113

        Args:
114
            info (dict): Data information of single data sample.
Wenwei Zhang's avatar
Wenwei Zhang committed
115
116

        Returns:
117
118
119
120
121
122
123
124
125
            dict: annotation information consists of the following keys:

                - bboxes_3d (:obj:`LiDARInstance3DBoxes`):
                    3D ground truth bboxes.
                - bbox_labels_3d (np.ndarray): Labels of ground truths.
                - gt_bboxes (np.ndarray): 2D ground truth bboxes.
                - gt_labels (np.ndarray): Labels of ground truths.
                - difficulty (int): Difficulty defined by KITTI.
                    0, 1, 2 represent xxxxx respectively.
Wenwei Zhang's avatar
Wenwei Zhang committed
126
        """
127
128
129
130
131
132
133
134
135
136
137
138
139
140
        ann_info = Det3DDataset.parse_ann_info(self, info)
        if ann_info is None:
            # empty instance
            anns_results = {}
            anns_results['gt_bboxes_3d'] = np.zeros((0, 7), dtype=np.float32)
            anns_results['gt_labels_3d'] = np.zeros(0, dtype=np.int64)
            return anns_results

        ann_info = self._remove_dontcare(ann_info)
        # in kitti, lidar2cam = R0_rect @ Tr_velo_to_cam
        # convert gt_bboxes_3d to velodyne coordinates with `lidar2cam`
        if 'gt_bboxes' in ann_info:
            gt_bboxes = ann_info['gt_bboxes']
            gt_labels = ann_info['gt_labels']
Wenwei Zhang's avatar
Wenwei Zhang committed
141
        else:
142
143
144
145
146
            gt_bboxes = np.zeros((0, 4), dtype=np.float32)
            gt_labels = np.array([], dtype=np.int64)
        if 'centers_2d' in ann_info:
            centers_2d = ann_info['centers_2d']
            depths = ann_info['depths']
Wenwei Zhang's avatar
Wenwei Zhang committed
147
        else:
148
149
            centers_2d = np.zeros((0, 2), dtype=np.float32)
            depths = np.zeros((0), dtype=np.float32)
Wenwei Zhang's avatar
Wenwei Zhang committed
150

151
152
153
154
155
        if self.task == 'mono3d':
            gt_bboxes_3d = CameraInstance3DBoxes(
                ann_info['gt_bboxes_3d'],
                box_dim=ann_info['gt_bboxes_3d'].shape[-1],
                origin=(0.5, 0.5, 0.5))
Wenwei Zhang's avatar
Wenwei Zhang committed
156
157

        else:
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
            lidar2cam = np.array(
                info['images'][self.default_cam_key]['lidar2cam'])

            gt_bboxes_3d = CameraInstance3DBoxes(
                ann_info['gt_bboxes_3d']).convert_to(self.box_mode_3d,
                                                     np.linalg.inv(lidar2cam))

        anns_results = dict(
            gt_bboxes_3d=gt_bboxes_3d,
            gt_labels_3d=ann_info['gt_labels_3d'],
            gt_bboxes=gt_bboxes,
            gt_labels=gt_labels,
            centers_2d=centers_2d,
            depths=depths)

        return anns_results

    def load_data_list(self) -> List[dict]:
        """Add the load interval."""
        data_list = super().load_data_list()
        data_list = data_list[::self.load_interval]
        return data_list

    def parse_data_info(self, info: dict) -> dict:
        """if task is lidar or multiview det, use super() method elif task is
        mono3d, split the info from frame-wise to img-wise."""
        if self.task != 'mono3d':
            if self.cam_sync_instances:
                # use the cam sync labels
                info['instances'] = info['cam_sync_instances']
            return super().parse_data_info(info)
        else:
            # in the mono3d, the instances is from cam sync.
            data_list = []
            if self.modality['use_lidar']:
                info['lidar_points']['lidar_path'] =  \
                    osp.join(
                        self.data_prefix.get('pts', ''),
                        info['lidar_points']['lidar_path'])

            if self.modality['use_camera']:
                for cam_key, img_info in info['images'].items():
                    if 'img_path' in img_info:
                        cam_prefix = self.data_prefix.get(cam_key, '')
                        img_info['img_path'] = osp.join(
                            cam_prefix, img_info['img_path'])

            for (cam_key, img_info) in info['images'].items():
                camera_info = dict()
                camera_info['images'] = dict()
                camera_info['images'][cam_key] = img_info
                if 'cam_instances' in info \
                        and cam_key in info['cam_instances']:
                    camera_info['instances'] = info['cam_instances'][cam_key]
                else:
                    camera_info['instances'] = []
                camera_info['ego2global'] = info['ego2global']
                if 'image_sweeps' in info:
                    camera_info['image_sweeps'] = info['image_sweeps']

                # TODO check if need to modify the sample id
                # TODO check when will use it except for evaluation.
                camera_info['sample_id'] = info['sample_id']

                if not self.test_mode:
                    # used in training
                    camera_info['ann_info'] = self.parse_ann_info(camera_info)
                if self.test_mode and self.load_eval_anns:
                    info['eval_ann_info'] = self.parse_ann_info(info)
                data_list.append(camera_info)
            return data_list