Commit 24cde1eb authored by liyinhao's avatar liyinhao
Browse files

add scannet and sunrgbd to create data

add assertion
parent 97e4aeb7
...@@ -3,6 +3,8 @@ import os.path as osp ...@@ -3,6 +3,8 @@ import os.path as osp
import tools.data_converter.kitti_converter as kitti import tools.data_converter.kitti_converter as kitti
import tools.data_converter.nuscenes_converter as nuscenes_converter import tools.data_converter.nuscenes_converter as nuscenes_converter
import tools.data_converter.scannet_converter as scannet
import tools.data_converter.sunrgbd_converter as sunrgbd
from tools.data_converter.create_gt_database import create_groundtruth_database from tools.data_converter.create_gt_database import create_groundtruth_database
...@@ -43,6 +45,14 @@ def nuscenes_data_prep(root_path, ...@@ -43,6 +45,14 @@ def nuscenes_data_prep(root_path,
'{}/{}_infos_train.pkl'.format(out_dir, info_prefix)) '{}/{}_infos_train.pkl'.format(out_dir, info_prefix))
def scannet_data_prep(root_path, info_prefix, out_dir):
scannet.create_scannet_info_file(root_path, info_prefix, out_dir)
def sunrgbd_data_prep(root_path, info_prefix, out_dir):
sunrgbd.create_sunrgbd_info_file(root_path, info_prefix, out_dir)
parser = argparse.ArgumentParser(description='Data converter arg parser') parser = argparse.ArgumentParser(description='Data converter arg parser')
parser.add_argument('dataset', metavar='kitti', help='name of the dataset') parser.add_argument('dataset', metavar='kitti', help='name of the dataset')
parser.add_argument( parser.add_argument(
...@@ -104,3 +114,13 @@ if __name__ == '__main__': ...@@ -104,3 +114,13 @@ if __name__ == '__main__':
dataset_name='NuScenesDataset', dataset_name='NuScenesDataset',
out_dir=args.out_dir, out_dir=args.out_dir,
max_sweeps=args.max_sweeps) max_sweeps=args.max_sweeps)
elif args.dataset == 'scannet':
scannet_data_prep(
root_path=args.root_path,
info_prefix=args.extra_tag,
out_dir=args.out_dir)
elif args.dataset == 'sunrgbd':
sunrgbd_data_prep(
root_path=args.root_path,
info_prefix=args.extra_tag,
out_dir=args.out_dir)
import os
import pickle import pickle
from pathlib import Path from pathlib import Path
from tools.data_converter.scannet_data_utils import ScannetObject from tools.data_converter.scannet_data_utils import ScannetObject
def create_scannet_info_file(data_path, def create_scannet_info_file(data_path, pkl_prefix='scannet', save_path=None):
pkl_prefix='scannet_', assert os.path.exists(data_path)
save_path=None,
relative_path=True):
if save_path is None: if save_path is None:
save_path = Path(data_path) save_path = Path(data_path)
else: else:
save_path = Path(save_path) save_path = Path(save_path)
assert os.path.exists(save_path)
train_filename = save_path / f'{pkl_prefix}_infos_train.pkl' train_filename = save_path / f'{pkl_prefix}_infos_train.pkl'
val_filename = save_path / f'{pkl_prefix}_infos_val.pkl' val_filename = save_path / f'{pkl_prefix}_infos_val.pkl'
dataset = ScannetObject(root_path=data_path, split='train') dataset = ScannetObject(root_path=data_path, split='train')
......
import os
import pickle import pickle
from pathlib import Path from pathlib import Path
...@@ -5,16 +6,18 @@ from tools.data_converter.sunrgbd_data_utils import SUNRGBDObject ...@@ -5,16 +6,18 @@ from tools.data_converter.sunrgbd_data_utils import SUNRGBDObject
def create_sunrgbd_info_file(data_path, def create_sunrgbd_info_file(data_path,
pkl_prefix='sunrgbd_', pkl_prefix='sunrgbd',
save_path=None, save_path=None,
relative_path=True): use_v1=False):
assert os.path.exists(data_path)
if save_path is None: if save_path is None:
save_path = Path(data_path) save_path = Path(data_path)
else: else:
save_path = Path(save_path) save_path = Path(save_path)
assert os.path.exists(save_path)
train_filename = save_path / f'{pkl_prefix}_infos_train.pkl' train_filename = save_path / f'{pkl_prefix}_infos_train.pkl'
val_filename = save_path / f'{pkl_prefix}_infos_val.pkl' val_filename = save_path / f'{pkl_prefix}_infos_val.pkl'
dataset = SUNRGBDObject(root_path=data_path, split='train') dataset = SUNRGBDObject(root_path=data_path, split='train', use_v1=use_v1)
train_split, val_split = 'train', 'val' train_split, val_split = 'train', 'val'
dataset.set_split(train_split) dataset.set_split(train_split)
......
...@@ -141,10 +141,12 @@ class SUNRGBDObject(object): ...@@ -141,10 +141,12 @@ class SUNRGBDObject(object):
info = dict() info = dict()
pc_info = {'num_features': 6, 'lidar_idx': sample_idx} pc_info = {'num_features': 6, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info info['point_cloud'] = pc_info
img_name = os.path.join(self.image_dir, '%06d.jpg' % (sample_idx))
img_path = os.path.join(self.image_dir, img_name)
image_info = { image_info = {
'image_idx': sample_idx, 'image_idx': sample_idx,
'image_shape': self.get_image_shape(sample_idx) 'image_shape': self.get_image_shape(sample_idx),
'image_path': img_path
} }
info['image'] = image_info info['image'] = image_info
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment