Unverified Commit cf6f4732 authored by Xiang Xu's avatar Xiang Xu Committed by GitHub
Browse files

[Feature] File I/O migration and reconstruction (#2319)

* refactor leio

* update mmengine and mmcv version

* update

* update docs

* update version
parent b2e5ad6b
......@@ -41,8 +41,8 @@ class Prediction2Waymo(object):
validation and 2 for testing.
classes (dict): A list of class name.
workers (str): Number of parallel processes. Defaults to 2.
file_client_args (str): File client for reading gt in waymo format.
Defaults to ``dict(backend='disk')``.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
from_kitti_format (bool, optional): Whether the reuslts are kitti
format. Defaults to False.
idx2metainfo (Optional[dict], optional): The mapping from sample_idx to
......@@ -58,7 +58,7 @@ class Prediction2Waymo(object):
prefix: str,
classes: dict,
workers: int = 2,
file_client_args: dict = dict(backend='disk'),
backend_args: Optional[dict] = None,
from_kitti_format: bool = False,
idx2metainfo: Optional[dict] = None):
......@@ -69,7 +69,7 @@ class Prediction2Waymo(object):
self.prefix = prefix
self.classes = classes
self.workers = int(workers)
self.file_client_args = file_client_args
self.backend_args = backend_args
self.from_kitti_format = from_kitti_format
if idx2metainfo is not None:
self.idx2metainfo = idx2metainfo
......@@ -114,12 +114,12 @@ class Prediction2Waymo(object):
def get_file_names(self):
"""Get file names of waymo raw data."""
if 'path_mapping' in self.file_client_args:
for path in self.file_client_args['path_mapping'].keys():
if 'path_mapping' in self.backend_args:
for path in self.backend_args['path_mapping'].keys():
if path in self.waymo_tfrecords_dir:
self.waymo_tfrecords_dir = \
self.waymo_tfrecords_dir.replace(
path, self.file_client_args['path_mapping'][path])
path, self.backend_args['path_mapping'][path])
from petrel_client.client import Client
client = Client()
contents = client.list(self.waymo_tfrecords_dir)
......
......@@ -47,24 +47,21 @@ class KittiMetric(BaseMetric):
collect_device (str): Device name used for collecting results
from different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmengine.fileio.FileClient` for details.
Defaults to dict(backend='disk').
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
def __init__(
self,
ann_file: str,
metric: Union[str, List[str]] = 'bbox',
pcd_limit_range: List[float] = [0, -40, -3, 70.4, 40, 0.0],
prefix: Optional[str] = None,
pklfile_prefix: Optional[str] = None,
default_cam_key: str = 'CAM2',
format_only: bool = False,
submission_prefix: Optional[str] = None,
collect_device: str = 'cpu',
file_client_args: dict = dict(backend='disk')
) -> None:
def __init__(self,
ann_file: str,
metric: Union[str, List[str]] = 'bbox',
pcd_limit_range: List[float] = [0, -40, -3, 70.4, 40, 0.0],
prefix: Optional[str] = None,
pklfile_prefix: Optional[str] = None,
default_cam_key: str = 'CAM2',
format_only: bool = False,
submission_prefix: Optional[str] = None,
collect_device: str = 'cpu',
backend_args: Optional[dict] = None) -> None:
self.default_prefix = 'Kitti metric'
super(KittiMetric, self).__init__(
collect_device=collect_device, prefix=prefix)
......@@ -80,7 +77,7 @@ class KittiMetric(BaseMetric):
self.submission_prefix = submission_prefix
self.default_cam_key = default_cam_key
self.file_client_args = file_client_args
self.backend_args = backend_args
allowed_metrics = ['bbox', 'img_bbox', 'mAP', 'LET_mAP']
self.metrics = metric if isinstance(metric, list) else [metric]
......@@ -188,7 +185,7 @@ class KittiMetric(BaseMetric):
self.classes = self.dataset_meta['classes']
# load annotations
pkl_infos = load(self.ann_file, file_client_args=self.file_client_args)
pkl_infos = load(self.ann_file, backend_args=self.backend_args)
self.data_infos = self.convert_annos_to_kitti_annos(pkl_infos)
result_dict, tmp_dir = self.format_results(
results,
......
......@@ -46,21 +46,19 @@ class LyftMetric(BaseMetric):
'gpu'. Defaults to 'cpu'.
"""
def __init__(
self,
data_root: str,
ann_file: str,
metric: Union[str, List[str]] = 'bbox',
modality=dict(
use_camera=False,
use_lidar=True,
),
prefix: Optional[str] = None,
jsonfile_prefix: str = None,
csv_savepath: str = None,
collect_device: str = 'cpu',
file_client_args: dict = dict(backend='disk')
) -> None:
def __init__(self,
data_root: str,
ann_file: str,
metric: Union[str, List[str]] = 'bbox',
modality=dict(
use_camera=False,
use_lidar=True,
),
prefix: Optional[str] = None,
jsonfile_prefix: str = None,
csv_savepath: str = None,
collect_device: str = 'cpu',
backend_args: Optional[dict] = None) -> None:
self.default_prefix = 'Lyft metric'
super(LyftMetric, self).__init__(
collect_device=collect_device, prefix=prefix)
......@@ -68,7 +66,7 @@ class LyftMetric(BaseMetric):
self.data_root = data_root
self.modality = modality
self.jsonfile_prefix = jsonfile_prefix
self.file_client_args = file_client_args
self.backend_args = backend_args
self.csv_savepath = csv_savepath
self.metrics = metric if isinstance(metric, list) else [metric]
......@@ -115,7 +113,7 @@ class LyftMetric(BaseMetric):
# load annotations
self.data_infos = load(
self.ann_file, file_client_args=self.file_client_args)['data_list']
self.ann_file, backend_args=self.backend_args)['data_list']
result_dict, tmp_dir = self.format_results(results, classes,
self.jsonfile_prefix)
......
......@@ -47,9 +47,8 @@ class NuScenesMetric(BaseMetric):
collect_device (str): Device name used for collecting results
from different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmengine.fileio.FileClient` for details.
Defaults to dict(backend='disk').
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
NameMapping = {
'movable_object.barrier': 'barrier',
......@@ -88,19 +87,17 @@ class NuScenesMetric(BaseMetric):
'attr_err': 'mAAE'
}
def __init__(
self,
data_root: str,
ann_file: str,
metric: Union[str, List[str]] = 'bbox',
modality: dict = dict(use_camera=False, use_lidar=True),
prefix: Optional[str] = None,
format_only: bool = False,
jsonfile_prefix: Optional[str] = None,
eval_version: str = 'detection_cvpr_2019',
collect_device: str = 'cpu',
file_client_args: dict = dict(backend='disk')
) -> None:
def __init__(self,
data_root: str,
ann_file: str,
metric: Union[str, List[str]] = 'bbox',
modality: dict = dict(use_camera=False, use_lidar=True),
prefix: Optional[str] = None,
format_only: bool = False,
jsonfile_prefix: Optional[str] = None,
eval_version: str = 'detection_cvpr_2019',
collect_device: str = 'cpu',
backend_args: Optional[dict] = None) -> None:
self.default_prefix = 'NuScenes metric'
super(NuScenesMetric, self).__init__(
collect_device=collect_device, prefix=prefix)
......@@ -120,7 +117,7 @@ class NuScenesMetric(BaseMetric):
'the end.'
self.jsonfile_prefix = jsonfile_prefix
self.file_client_args = file_client_args
self.backend_args = backend_args
self.metrics = metric if isinstance(metric, list) else [metric]
......@@ -169,7 +166,7 @@ class NuScenesMetric(BaseMetric):
self.version = self.dataset_meta['version']
# load annotations
self.data_infos = load(
self.ann_file, file_client_args=self.file_client_args)['data_list']
self.ann_file, backend_args=self.backend_args)['data_list']
result_dict, tmp_dir = self.format_results(results, classes,
self.jsonfile_prefix)
......
......@@ -69,8 +69,8 @@ class WaymoMetric(KittiMetric):
collect_device (str): Device name used for collecting results
from different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
file_client_args (dict): File client for reading gt in waymo format.
Defaults to ``dict(backend='disk')``.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
idx2metainfo (str, optional): The file path of the metainfo in waymo.
It stores the mapping from sample_idx to metainfo. The metainfo
must contain the keys: 'idx2contextname' and 'idx2timestamp'.
......@@ -94,7 +94,7 @@ class WaymoMetric(KittiMetric):
default_cam_key: str = 'CAM_FRONT',
use_pred_sample_idx: bool = False,
collect_device: str = 'cpu',
file_client_args: dict = dict(backend='disk'),
backend_args: Optional[dict] = None,
idx2metainfo: Optional[str] = None) -> None:
self.waymo_bin_file = waymo_bin_file
self.data_root = data_root
......@@ -117,7 +117,7 @@ class WaymoMetric(KittiMetric):
submission_prefix=submission_prefix,
default_cam_key=default_cam_key,
collect_device=collect_device,
file_client_args=file_client_args)
backend_args=backend_args)
self.format_only = format_only
if self.format_only:
assert pklfile_prefix is not None, 'pklfile_prefix must be '
......@@ -388,7 +388,7 @@ class WaymoMetric(KittiMetric):
waymo_results_final_path,
prefix,
classes,
file_client_args=self.file_client_args,
backend_args=self.backend_args,
from_kitti_format=self.convert_kitti_format,
idx2metainfo=self.idx2metainfo)
converter.convert()
......
......@@ -5,7 +5,7 @@ def replace_ceph_backend(cfg):
cfg_pretty_text = cfg.pretty_text
replace_strs = \
r'''file_client_args = dict(
r'''backend_args = dict(
backend='petrel',
path_mapping=dict({
'./data/DATA/': 's3://openmmlab/datasets/detection3d/CEPH/',
......@@ -49,6 +49,8 @@ def replace_ceph_backend(cfg):
# cfg_pretty_text = cfg_pretty_text.replace(
# 'ann_file', replace_strs + ', ann_file')
cfg_pretty_text = cfg_pretty_text.replace('backend_args=None', '')
# replace LoadImageFromFile
cfg_pretty_text = cfg_pretty_text.replace(
'LoadImageFromFile\'', 'LoadImageFromFile\',' + replace_strs)
......@@ -80,6 +82,18 @@ def replace_ceph_backend(cfg):
cfg_pretty_text = cfg_pretty_text.replace(
'LoadAnnotations3D\'', 'LoadAnnotations3D\',' + replace_strs)
# replace KittiMetric
cfg_pretty_text = cfg_pretty_text.replace('KittiMetric\'',
'KittiMetric\',' + replace_strs)
# replace LyftMetric
cfg_pretty_text = cfg_pretty_text.replace('LyftMetric\'',
'LyftMetric\',' + replace_strs)
# replace NuScenesMetric
cfg_pretty_text = cfg_pretty_text.replace(
'NuScenesMetric\'', 'NuScenesMetric\',' + replace_strs)
# replace WaymoMetric
cfg_pretty_text = cfg_pretty_text.replace('WaymoMetric\'',
'WaymoMetric\',' + replace_strs)
......
......@@ -3,8 +3,8 @@ import copy
from typing import Optional
import mmcv
import mmengine
import numpy as np
from mmengine.fileio import get
from mmdet3d.datasets.transforms import LoadMultiViewImageFromFiles
from mmdet3d.registry import TRANSFORMS
......@@ -23,9 +23,8 @@ class BEVLoadMultiViewImageFromFiles(LoadMultiViewImageFromFiles):
to_float32 (bool): Whether to convert the img to float32.
Defaults to False.
color_type (str): Color type of the file. Defaults to 'unchanged'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmengine.fileio.FileClient` for details.
Defaults to dict(backend='disk').
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
num_views (int): Number of view in a frame. Defaults to 5.
num_ref_frames (int): Number of frame in loading. Defaults to -1.
test_mode (bool): Whether is test mode in loading. Defaults to False.
......@@ -160,12 +159,11 @@ class BEVLoadMultiViewImageFromFiles(LoadMultiViewImageFromFiles):
results['ori_cam2img'] = copy.deepcopy(results['cam2img'])
if self.file_client is None:
self.file_client = mmengine.FileClient(**self.file_client_args)
# img is of shape (h, w, c, num_views)
# h and w can be different for different views
img_bytes = [self.file_client.get(name) for name in filename]
img_bytes = [
get(name, backend_args=self.backend_args) for name in filename
]
imgs = [
mmcv.imfrombytes(
img_byte,
......
......@@ -27,7 +27,7 @@ data_prefix = dict(
CAM_BACK_LEFT='samples/CAM_BACK_LEFT',
sweeps='sweeps/LIDAR_TOP')
input_modality = dict(use_lidar=True, use_camera=True)
file_client_args = dict(backend='disk')
backend_args = None
model = dict(
type='BEVFusion',
......@@ -209,20 +209,24 @@ db_sampler = dict(
coord_type='LIDAR',
load_dim=5,
use_dim=[0, 1, 2, 3, 4],
reduce_beams=32))
reduce_beams=32,
backend_args=backend_args),
backend_args=backend_args)
train_pipeline = [
dict(
type='BEVLoadMultiViewImageFromFiles',
to_float32=True,
color_type='color'),
color_type='color',
backend_args=backend_args),
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5,
reduce_beams=32,
load_augmented=None),
load_augmented=None,
backend_args=backend_args),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=9,
......@@ -231,7 +235,8 @@ train_pipeline = [
reduce_beams=32,
pad_empty_sweeps=True,
remove_close=True,
load_augmented=None),
load_augmented=None,
backend_args=backend_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
......@@ -285,15 +290,22 @@ test_pipeline = [
dict(
type='BEVLoadMultiViewImageFromFiles',
to_float32=True,
color_type='color'),
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5),
color_type='color',
backend_args=backend_args),
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5,
backend_args=backend_args),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=9,
load_dim=5,
use_dim=5,
pad_empty_sweeps=True,
remove_close=True),
remove_close=True,
backend_args=backend_args),
dict(
type='ImageAug3D',
final_dim=[256, 704],
......@@ -331,7 +343,8 @@ train_dataloader = dict(
data_prefix=data_prefix,
# we use box_type_3d='LiDAR' in kitti and nuscenes dataset
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR'))
box_type_3d='LiDAR',
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=0,
......@@ -347,14 +360,16 @@ val_dataloader = dict(
modality=input_modality,
data_prefix=data_prefix,
test_mode=True,
box_type_3d='LiDAR'))
box_type_3d='LiDAR',
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='NuScenesMetric',
data_root=data_root,
ann_file=data_root + 'nuscenes_infos_val.pkl',
metric='bbox')
metric='bbox',
backend_args=backend_args)
test_evaluator = val_evaluator
vis_backends = [dict(type='LocalVisBackend')]
......
......@@ -13,7 +13,7 @@ class_names = ['Car', 'Pedestrian', 'Cyclist']
tasks = [dict(num_class=3, class_names=['car', 'pedestrian', 'cyclist'])]
metainfo = dict(classes=class_names)
input_modality = dict(use_lidar=True, use_camera=False)
file_client_args = dict(backend='disk')
backend_args = None
model = dict(
type='CenterFormer',
......@@ -120,7 +120,9 @@ db_sampler = dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=6,
use_dim=[0, 1, 2, 3, 4]))
use_dim=[0, 1, 2, 3, 4],
backend_args=backend_args),
backend_args=backend_args)
train_pipeline = [
dict(
......@@ -128,7 +130,8 @@ train_pipeline = [
coord_type='LIDAR',
load_dim=6,
use_dim=5,
norm_intensity=True),
norm_intensity=True,
backend_args=backend_args),
# Add this if using `MultiFrameDeformableDecoderRPN`
# dict(
# type='LoadPointsFromMultiSweeps',
......@@ -160,7 +163,7 @@ test_pipeline = [
load_dim=6,
use_dim=5,
norm_intensity=True,
file_client_args=file_client_args),
backend_args=backend_args),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
......@@ -199,7 +202,7 @@ train_dataloader = dict(
box_type_3d='LiDAR',
# load one frame every five frames
load_interval=5,
file_client_args=file_client_args))
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=1,
......@@ -216,7 +219,7 @@ val_dataloader = dict(
test_mode=True,
metainfo=metainfo,
box_type_3d='LiDAR',
file_client_args=file_client_args))
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
......@@ -224,7 +227,7 @@ val_evaluator = dict(
ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl',
waymo_bin_file='./data/waymo/waymo_format/gt.bin',
data_root='./data/waymo/waymo_format',
file_client_args=file_client_args,
backend_args=backend_args,
convert_kitti_format=False,
idx2metainfo='./data/waymo/waymo_format/idx2metainfo.pkl')
test_evaluator = val_evaluator
......
......@@ -128,9 +128,13 @@ test_transforms = [
]
train_transforms = [dict(type='PhotoMetricDistortion3D')] + test_transforms
file_client_args = dict(backend='disk')
backend_args = None
train_pipeline = [
dict(type='LoadMultiViewImageFromFiles', to_float32=True, num_views=6),
dict(
type='LoadMultiViewImageFromFiles',
to_float32=True,
num_views=6,
backend_args=backend_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
......@@ -143,7 +147,11 @@ train_pipeline = [
]
test_pipeline = [
dict(type='LoadMultiViewImageFromFiles', to_float32=True, num_views=6),
dict(
type='LoadMultiViewImageFromFiles',
to_float32=True,
num_views=6,
backend_args=backend_args),
dict(type='MultiViewWrapper', transforms=test_transforms),
dict(type='Pack3DDetInputs', keys=['img'])
]
......@@ -176,7 +184,8 @@ train_dataloader = dict(
data_prefix=data_prefix,
# we use box_type_3d='LiDAR' in kitti and nuscenes dataset
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR'))
box_type_3d='LiDAR',
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
......@@ -194,7 +203,8 @@ val_dataloader = dict(
modality=input_modality,
test_mode=True,
data_prefix=data_prefix,
box_type_3d='LiDAR'))
box_type_3d='LiDAR',
backend_args=backend_args))
test_dataloader = val_dataloader
......@@ -202,7 +212,8 @@ val_evaluator = dict(
type='NuScenesMetric',
data_root=data_root,
ann_file=data_root + 'nuscenes_infos_val.pkl',
metric='bbox')
metric='bbox',
backend_args=backend_args)
test_evaluator = val_evaluator
optim_wrapper = dict(
......
......@@ -115,7 +115,7 @@ model = dict(
dataset_type = 'NuScenesDataset'
data_root = 'data/nuscenes/'
file_client_args = dict(backend='disk')
backend_args = None
db_sampler = dict(
data_root=data_root,
......@@ -150,7 +150,9 @@ db_sampler = dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=[0, 1, 2, 3, 4]))
use_dim=[0, 1, 2, 3, 4],
backend_args=backend_args),
backend_args=backend_args)
ida_aug_conf = {
'resize_lim': (0.47, 0.625),
'final_dim': (320, 800),
......@@ -162,7 +164,10 @@ ida_aug_conf = {
}
train_pipeline = [
dict(type='LoadMultiViewImageFromFiles', to_float32=True),
dict(
type='LoadMultiViewImageFromFiles',
to_float32=True,
backend_args=backend_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
......@@ -187,7 +192,10 @@ train_pipeline = [
])
]
test_pipeline = [
dict(type='LoadMultiViewImageFromFiles', to_float32=True),
dict(
type='LoadMultiViewImageFromFiles',
to_float32=True,
backend_args=backend_args),
dict(
type='ResizeCropFlipImage', data_aug_conf=ida_aug_conf,
training=False),
......@@ -212,7 +220,8 @@ train_dataloader = dict(
metainfo=metainfo,
test_mode=False,
modality=input_modality,
use_valid_flag=True))
use_valid_flag=True,
backend_args=backend_args))
test_dataloader = dict(
dataset=dict(
type=dataset_type,
......@@ -229,7 +238,8 @@ test_dataloader = dict(
metainfo=metainfo,
test_mode=True,
modality=input_modality,
use_valid_flag=True))
use_valid_flag=True,
backend_args=backend_args))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
......@@ -246,7 +256,8 @@ val_dataloader = dict(
metainfo=metainfo,
test_mode=True,
modality=input_modality,
use_valid_flag=True))
use_valid_flag=True,
backend_args=backend_args))
# Different from original PETR:
# We don't use special lr for image_backbone
......
mmcv>=2.0.0rc4,<2.1.0
mmdet>=3.0.0rc0,<3.1.0
mmengine>=0.4.0,<1.0.0
mmengine>=0.6.0,<1.0.0
mmcv>=2.0.0rc0
mmcv>=2.0.0rc4
mmdet>=3.0.0rc0
mmengine>=0.1.0
mmengine>=0.6.0
torch
torchvision
......@@ -16,12 +16,12 @@ class TestLoadPointsFromFile(unittest.TestCase):
def test_load_points_from_file(self):
use_dim = 3
file_client_args = dict(backend='disk')
backend_args = None
load_points_transform = LoadPointsFromFile(
coord_type='LIDAR',
load_dim=4,
use_dim=use_dim,
file_client_args=file_client_args)
backend_args=backend_args)
data_info = create_dummy_data_info()
info = load_points_transform(data_info)
self.assertIn('points', info)
......@@ -30,7 +30,7 @@ class TestLoadPointsFromFile(unittest.TestCase):
coord_type='DEPTH',
load_dim=4,
use_dim=use_dim,
file_client_args=file_client_args)
backend_args=backend_args)
info = load_points_transform(data_info)
self.assertIsInstance(info['points'], DepthPoints)
self.assertEqual(info['points'].shape[-1], use_dim)
......@@ -39,7 +39,7 @@ class TestLoadPointsFromFile(unittest.TestCase):
load_dim=4,
use_dim=use_dim,
shift_height=True,
file_client_args=file_client_args)
backend_args=backend_args)
info = load_points_transform(data_info)
# extra height dim
self.assertEqual(info['points'].shape[-1], use_dim + 1)
......@@ -53,7 +53,7 @@ class TestLoadPointsFromFile(unittest.TestCase):
class TestLoadAnnotations3D(unittest.TestCase):
def test_load_points_from_file(self):
file_client_args = dict(backend='disk')
backend_args = None
load_anns_transform = LoadAnnotations3D(
with_bbox_3d=True,
......@@ -62,7 +62,7 @@ class TestLoadAnnotations3D(unittest.TestCase):
seg_offset=2**16,
dataset_type='semantickitti',
seg_3d_dtype='np.uint32',
file_client_args=file_client_args)
backend_args=backend_args)
self.assertIs(load_anns_transform.with_seg, False)
self.assertIs(load_anns_transform.with_bbox_3d, True)
self.assertIs(load_anns_transform.with_label_3d, True)
......
......@@ -147,7 +147,7 @@ def create_groundtruth_database(dataset_class_name,
dataset_cfg = dict(
type=dataset_class_name, data_root=data_path, ann_file=info_path)
if dataset_class_name == 'KittiDataset':
file_client_args = dict(backend='disk')
backend_args = None
dataset_cfg.update(
modality=dict(
use_lidar=True,
......@@ -161,12 +161,12 @@ def create_groundtruth_database(dataset_class_name,
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=file_client_args),
backend_args=backend_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=file_client_args)
backend_args=backend_args)
])
elif dataset_class_name == 'NuScenesDataset':
......@@ -193,7 +193,7 @@ def create_groundtruth_database(dataset_class_name,
])
elif dataset_class_name == 'WaymoDataset':
file_client_args = dict(backend='disk')
backend_args = None
dataset_cfg.update(
test_mode=False,
data_prefix=dict(
......@@ -210,12 +210,12 @@ def create_groundtruth_database(dataset_class_name,
coord_type='LIDAR',
load_dim=6,
use_dim=6,
file_client_args=file_client_args),
backend_args=backend_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=file_client_args)
backend_args=backend_args)
])
dataset = DATASETS.build(dataset_cfg)
......@@ -510,7 +510,7 @@ class GTDatabaseCreater:
data_root=self.data_path,
ann_file=self.info_path)
if self.dataset_class_name == 'KittiDataset':
file_client_args = dict(backend='disk')
backend_args = None
dataset_cfg.update(
test_mode=False,
data_prefix=dict(
......@@ -527,12 +527,12 @@ class GTDatabaseCreater:
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=file_client_args),
backend_args=backend_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=file_client_args)
backend_args=backend_args)
])
elif self.dataset_class_name == 'NuScenesDataset':
......@@ -560,7 +560,7 @@ class GTDatabaseCreater:
])
elif self.dataset_class_name == 'WaymoDataset':
file_client_args = dict(backend='disk')
backend_args = None
dataset_cfg.update(
test_mode=False,
data_prefix=dict(
......@@ -579,12 +579,12 @@ class GTDatabaseCreater:
coord_type='LIDAR',
load_dim=6,
use_dim=6,
file_client_args=file_client_args),
backend_args=backend_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=file_client_args)
backend_args=backend_args)
])
self.dataset = DATASETS.build(dataset_cfg)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment