# Copyright (c) OpenMMLab. All rights reserved. from mmengine.dataset.sampler import DefaultSampler from mmengine.visualization.vis_backend import LocalVisBackend from mmdet3d.datasets.nuscenes_dataset import NuScenesDataset from mmdet3d.datasets.transforms.formating import Pack3DDetInputs from mmdet3d.datasets.transforms.loading import (LoadAnnotations3D, LoadPointsFromFile, LoadPointsFromMultiSweeps) from mmdet3d.datasets.transforms.test_time_aug import MultiScaleFlipAug3D from mmdet3d.datasets.transforms.transforms_3d import ( # noqa GlobalRotScaleTrans, ObjectNameFilter, ObjectRangeFilter, PointShuffle, PointsRangeFilter, RandomFlip3D) from mmdet3d.evaluation.metrics.nuscenes_metric import NuScenesMetric from mmdet3d.visualization.local_visualizer import Det3DLocalVisualizer # If point cloud range is changed, the models should also change their point # cloud range accordingly point_cloud_range = [-50, -50, -5, 50, 50, 3] # Using calibration info convert the Lidar-coordinate point cloud range to the # ego-coordinate point cloud range could bring a little promotion in nuScenes. # point_cloud_range = [-50, -50.8, -5, 50, 49.2, 3] # For nuScenes we usually do 10-class detection class_names = [ 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' ] metainfo = dict(classes=class_names) dataset_type = 'NuScenesDataset' data_root = 'data/nuscenes/' # Input modality for nuScenes dataset, this is consistent with the submission # format which requires the information in input_modality. input_modality = dict(use_lidar=True, use_camera=False) data_prefix = dict(pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP') # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/datasets/detection3d/nuscenes/' # Method 2: Use backend_args, file_client_args in versions before 1.1.0 # backend_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection3d/', # 'data/': 's3://openmmlab/datasets/detection3d/' # })) backend_args = None train_pipeline = [ dict( type=LoadPointsFromFile, coord_type='LIDAR', load_dim=5, use_dim=5, backend_args=backend_args), dict( type=LoadPointsFromMultiSweeps, sweeps_num=10, backend_args=backend_args), dict(type=LoadAnnotations3D, with_bbox_3d=True, with_label_3d=True), dict( type=GlobalRotScaleTrans, rot_range=[-0.3925, 0.3925], scale_ratio_range=[0.95, 1.05], translation_std=[0, 0, 0]), dict(type=RandomFlip3D, flip_ratio_bev_horizontal=0.5), dict(type=PointsRangeFilter, point_cloud_range=point_cloud_range), dict(type=ObjectRangeFilter, point_cloud_range=point_cloud_range), dict(type=ObjectNameFilter, classes=class_names), dict(type=PointShuffle), dict( type=Pack3DDetInputs, keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) ] test_pipeline = [ dict( type=LoadPointsFromFile, coord_type='LIDAR', load_dim=5, use_dim=5, backend_args=backend_args), dict( type=LoadPointsFromMultiSweeps, sweeps_num=10, test_mode=True, backend_args=backend_args), dict( type=MultiScaleFlipAug3D, img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( type=GlobalRotScaleTrans, rot_range=[0, 0], scale_ratio_range=[1., 1.], translation_std=[0, 0, 0]), dict(type=RandomFlip3D), dict(type=PointsRangeFilter, point_cloud_range=point_cloud_range) ]), dict(type=Pack3DDetInputs, keys=['points']) ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( type=LoadPointsFromFile, coord_type='LIDAR', load_dim=5, use_dim=5, backend_args=backend_args), dict( type=LoadPointsFromMultiSweeps, sweeps_num=10, test_mode=True, backend_args=backend_args), dict(type=Pack3DDetInputs, keys=['points']) ] train_dataloader = dict( batch_size=4, num_workers=4, persistent_workers=True, sampler=dict(type=DefaultSampler, shuffle=True), dataset=dict( type=NuScenesDataset, data_root=data_root, ann_file='nuscenes_infos_train.pkl', pipeline=train_pipeline, metainfo=metainfo, modality=input_modality, test_mode=False, data_prefix=data_prefix, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. box_type_3d='LiDAR', backend_args=backend_args)) test_dataloader = dict( batch_size=1, num_workers=1, persistent_workers=True, drop_last=False, sampler=dict(type=DefaultSampler, shuffle=False), dataset=dict( type=NuScenesDataset, data_root=data_root, ann_file='nuscenes_infos_val.pkl', pipeline=test_pipeline, metainfo=metainfo, modality=input_modality, data_prefix=data_prefix, test_mode=True, box_type_3d='LiDAR', backend_args=backend_args)) val_dataloader = dict( batch_size=1, num_workers=1, persistent_workers=True, drop_last=False, sampler=dict(type=DefaultSampler, shuffle=False), dataset=dict( type=NuScenesDataset, data_root=data_root, ann_file='nuscenes_infos_val.pkl', pipeline=test_pipeline, metainfo=metainfo, modality=input_modality, test_mode=True, data_prefix=data_prefix, box_type_3d='LiDAR', backend_args=backend_args)) val_evaluator = dict( type=NuScenesMetric, data_root=data_root, ann_file=data_root + 'nuscenes_infos_val.pkl', metric='bbox', backend_args=backend_args) test_evaluator = val_evaluator vis_backends = [dict(type=LocalVisBackend)] visualizer = dict( type=Det3DLocalVisualizer, vis_backends=vis_backends, name='visualizer')