Commit e0d892c7 authored by liyinhao's avatar liyinhao
Browse files

Merge branch 'master_temp' into indoor_loading

# Conflicts:
#	tools/data_converter/sunrgbd_data_utils.py
parents 929ebfe8 f584b970
...@@ -6,8 +6,8 @@ from mmdet3d.ops.roiaware_pool3d import (RoIAwarePool3d, points_in_boxes_cpu, ...@@ -6,8 +6,8 @@ from mmdet3d.ops.roiaware_pool3d import (RoIAwarePool3d, points_in_boxes_cpu,
def test_RoIAwarePool3d(): def test_RoIAwarePool3d():
if not torch.cuda.is_available( # RoIAwarePool3d only support gpu version currently.
): # RoIAwarePool3d only support gpu version currently. if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda') pytest.skip('test requires GPU and torch+cuda')
roiaware_pool3d_max = RoIAwarePool3d( roiaware_pool3d_max = RoIAwarePool3d(
out_size=4, max_pts_per_voxel=128, mode='max') out_size=4, max_pts_per_voxel=128, mode='max')
...@@ -19,23 +19,10 @@ def test_RoIAwarePool3d(): ...@@ -19,23 +19,10 @@ def test_RoIAwarePool3d():
dtype=torch.float32).cuda( dtype=torch.float32).cuda(
) # boxes (m, 7) with bottom center in lidar coordinate ) # boxes (m, 7) with bottom center in lidar coordinate
pts = torch.tensor( pts = torch.tensor(
[ [[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6],
[1, 2, 3.3], [0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3],
[1.2, 2.5, 3.0], [4.7, 3.5, -12.2], [3.8, 7.6, -2], [-10.6, -12.9, -20], [-16, -18, 9],
[0.8, 2.1, 3.5], [-21.3, -52, -5], [0, 0, 0], [6, 7, 8], [-2, -3, -4]],
[1.6, 2.6, 3.6],
[0.8, 1.2, 3.9],
[-9.2, 21.0, 18.2],
[3.8, 7.9, 6.3],
[4.7, 3.5, -12.2],
[3.8, 7.6, -2],
[-10.6, -12.9, -20],
[-16, -18, 9],
[-21.3, -52, -5],
[0, 0, 0],
[6, 7, 8],
[-2, -3, -4],
],
dtype=torch.float32).cuda() # points (n, 3) in lidar coordinate dtype=torch.float32).cuda() # points (n, 3) in lidar coordinate
pts_feature = pts.clone() pts_feature = pts.clone()
...@@ -83,23 +70,10 @@ def test_points_in_boxes_cpu(): ...@@ -83,23 +70,10 @@ def test_points_in_boxes_cpu():
dtype=torch.float32 dtype=torch.float32
) # boxes (m, 7) with bottom center in lidar coordinate ) # boxes (m, 7) with bottom center in lidar coordinate
pts = torch.tensor( pts = torch.tensor(
[ [[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6],
[1, 2, 3.3], [0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3],
[1.2, 2.5, 3.0], [4.7, 3.5, -12.2], [3.8, 7.6, -2], [-10.6, -12.9, -20], [-16, -18, 9],
[0.8, 2.1, 3.5], [-21.3, -52, -5], [0, 0, 0], [6, 7, 8], [-2, -3, -4]],
[1.6, 2.6, 3.6],
[0.8, 1.2, 3.9],
[-9.2, 21.0, 18.2],
[3.8, 7.9, 6.3],
[4.7, 3.5, -12.2],
[3.8, 7.6, -2],
[-10.6, -12.9, -20],
[-16, -18, 9],
[-21.3, -52, -5],
[0, 0, 0],
[6, 7, 8],
[-2, -3, -4],
],
dtype=torch.float32) # points (n, 3) in lidar coordinate dtype=torch.float32) # points (n, 3) in lidar coordinate
point_indices = points_in_boxes_cpu(points=pts, boxes=boxes) point_indices = points_in_boxes_cpu(points=pts, boxes=boxes)
...@@ -109,9 +83,3 @@ def test_points_in_boxes_cpu(): ...@@ -109,9 +83,3 @@ def test_points_in_boxes_cpu():
dtype=torch.int32) dtype=torch.int32)
assert point_indices.shape == torch.Size([2, 15]) assert point_indices.shape == torch.Size([2, 15])
assert (point_indices == expected_point_indices).all() assert (point_indices == expected_point_indices).all()
if __name__ == '__main__':
test_points_in_boxes_cpu()
test_points_in_boxes_gpu()
test_RoIAwarePool3d()
import pytest
import torch
def test_PointwiseSemanticHead():
# PointwiseSemanticHead only support gpu version currently.
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
from mmdet3d.models.builder import build_head
head_cfg = dict(
type='PointwiseSemanticHead',
in_channels=8,
extra_width=0.2,
seg_score_thr=0.3,
num_classes=3,
loss_seg=dict(
type='FocalLoss',
use_sigmoid=True,
reduction='sum',
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_part=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
self = build_head(head_cfg)
self.cuda()
# test forward
voxel_features = torch.rand([4, 8], dtype=torch.float32).cuda()
feats_dict = self.forward(voxel_features)
assert feats_dict['seg_preds'].shape == torch.Size(
[voxel_features.shape[0], 1])
assert feats_dict['part_preds'].shape == torch.Size(
[voxel_features.shape[0], 3])
assert feats_dict['part_feats'].shape == torch.Size(
[voxel_features.shape[0], 4])
voxel_centers = torch.tensor(
[[6.56126, 0.9648336, -1.7339306], [6.8162713, -2.480431, -1.3616394],
[11.643568, -4.744306, -1.3580885], [23.482342, 6.5036807, 0.5806964]
],
dtype=torch.float32).cuda() # n, point_features
coordinates = torch.tensor(
[[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232],
[1, 35, 930, 469]],
dtype=torch.int32).cuda() # n, 4(batch, ind_x, ind_y, ind_z)
voxel_dict = dict(voxel_centers=voxel_centers, coors=coordinates)
gt_bboxes = list(
torch.tensor(
[[[6.4118, -3.4305, -1.7291, 1.7033, 3.4693, 1.6197, -0.9091]],
[[16.9107, 9.7925, -1.9201, 1.6097, 3.2786, 1.5307, -2.4056]]],
dtype=torch.float32).cuda())
gt_labels = list(torch.tensor([[0], [1]], dtype=torch.int64).cuda())
# test get_targets
target_dict = self.get_targets(voxel_dict, gt_bboxes, gt_labels)
assert target_dict['seg_targets'].shape == torch.Size(
[voxel_features.shape[0]])
assert target_dict['part_targets'].shape == torch.Size(
[voxel_features.shape[0], 3])
# test loss
loss_dict = self.loss(feats_dict['seg_preds'], feats_dict['part_preds'],
target_dict['seg_targets'],
target_dict['part_targets'])
assert loss_dict['loss_seg'] > 0
assert loss_dict['loss_part'] == 0 # no points in gt_boxes
total_loss = loss_dict['loss_seg'] + loss_dict['loss_part']
total_loss.backward()
if __name__ == '__main__':
test_PointwiseSemanticHead()
import torch
import mmdet3d.ops.spconv as spconv
from mmdet3d.ops import SparseBasicBlock, SparseBasicBlockV0
def test_SparseUNet():
from mmdet3d.models.middle_encoders.sparse_unet import SparseUNet
self = SparseUNet(
in_channels=4, output_shape=[41, 1600, 1408], pre_act=False)
# test encoder layers
assert len(self.encoder_layers) == 4
assert self.encoder_layers.encoder_layer1[0][0].in_channels == 16
assert self.encoder_layers.encoder_layer1[0][0].out_channels == 16
assert isinstance(self.encoder_layers.encoder_layer1[0][0],
spconv.conv.SubMConv3d)
assert isinstance(self.encoder_layers.encoder_layer1[0][1],
torch.nn.modules.batchnorm.BatchNorm1d)
assert isinstance(self.encoder_layers.encoder_layer1[0][2],
torch.nn.modules.activation.ReLU)
assert self.encoder_layers.encoder_layer4[0][0].in_channels == 64
assert self.encoder_layers.encoder_layer4[0][0].out_channels == 64
assert isinstance(self.encoder_layers.encoder_layer4[0][0],
spconv.conv.SparseConv3d)
assert isinstance(self.encoder_layers.encoder_layer4[2][0],
spconv.conv.SubMConv3d)
# test decoder layers
assert isinstance(self.lateral_layer1, SparseBasicBlock)
assert isinstance(self.merge_layer1[0], spconv.conv.SubMConv3d)
assert isinstance(self.upsample_layer1[0], spconv.conv.SubMConv3d)
assert isinstance(self.upsample_layer2[0], spconv.conv.SparseInverseConv3d)
voxel_features = torch.tensor([[6.56126, 0.9648336, -1.7339306, 0.315],
[6.8162713, -2.480431, -1.3616394, 0.36],
[11.643568, -4.744306, -1.3580885, 0.16],
[23.482342, 6.5036807, 0.5806964, 0.35]],
dtype=torch.float32) # n, point_features
coordinates = torch.tensor(
[[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232],
[1, 35, 930, 469]],
dtype=torch.int32) # n, 4(batch, ind_x, ind_y, ind_z)
unet_ret_dict = self.forward(voxel_features, coordinates, 2)
seg_features = unet_ret_dict['seg_features']
spatial_features = unet_ret_dict['spatial_features']
assert seg_features.shape == torch.Size([4, 16])
assert spatial_features.shape == torch.Size([2, 256, 200, 176])
def test_SparseBasicBlock():
voxel_features = torch.tensor([[6.56126, 0.9648336, -1.7339306, 0.315],
[6.8162713, -2.480431, -1.3616394, 0.36],
[11.643568, -4.744306, -1.3580885, 0.16],
[23.482342, 6.5036807, 0.5806964, 0.35]],
dtype=torch.float32) # n, point_features
coordinates = torch.tensor(
[[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232],
[1, 35, 930, 469]],
dtype=torch.int32) # n, 4(batch, ind_x, ind_y, ind_z)
# test v0
self = SparseBasicBlockV0(
4,
4,
indice_key='subm0',
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01))
input_sp_tensor = spconv.SparseConvTensor(voxel_features, coordinates,
[41, 1600, 1408], 2)
out_features = self(input_sp_tensor)
assert out_features.features.shape == torch.Size([4, 4])
# test
input_sp_tensor = spconv.SparseConvTensor(voxel_features, coordinates,
[41, 1600, 1408], 2)
self = SparseBasicBlock(
4,
4,
conv_cfg=dict(type='SubMConv3d', indice_key='subm1'),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01))
# test conv and bn layer
assert isinstance(self.conv1, spconv.conv.SubMConv3d)
assert self.conv1.in_channels == 4
assert self.conv1.out_channels == 4
assert isinstance(self.conv2, spconv.conv.SubMConv3d)
assert self.conv2.out_channels == 4
assert self.conv2.out_channels == 4
assert self.bn1.eps == 1e-3
assert self.bn1.momentum == 0.01
out_features = self(input_sp_tensor)
assert out_features.features.shape == torch.Size([4, 4])
import argparse import argparse
import os.path as osp import os.path as osp
import tools.data_converter.indoor_converter as indoor
import tools.data_converter.kitti_converter as kitti import tools.data_converter.kitti_converter as kitti
import tools.data_converter.nuscenes_converter as nuscenes_converter import tools.data_converter.nuscenes_converter as nuscenes_converter
import tools.data_converter.scannet_converter as scannet
import tools.data_converter.sunrgbd_converter as sunrgbd
from tools.data_converter.create_gt_database import create_groundtruth_database from tools.data_converter.create_gt_database import create_groundtruth_database
...@@ -46,11 +45,11 @@ def nuscenes_data_prep(root_path, ...@@ -46,11 +45,11 @@ def nuscenes_data_prep(root_path,
def scannet_data_prep(root_path, info_prefix, out_dir): def scannet_data_prep(root_path, info_prefix, out_dir):
scannet.create_scannet_info_file(root_path, info_prefix, out_dir) indoor.create_indoor_info_file(root_path, info_prefix, out_dir)
def sunrgbd_data_prep(root_path, info_prefix, out_dir): def sunrgbd_data_prep(root_path, info_prefix, out_dir):
sunrgbd.create_sunrgbd_info_file(root_path, info_prefix, out_dir) indoor.create_indoor_info_file(root_path, info_prefix, out_dir)
parser = argparse.ArgumentParser(description='Data converter arg parser') parser = argparse.ArgumentParser(description='Data converter arg parser')
......
import os
import mmcv
from tools.data_converter.scannet_data_utils import ScanNetData
from tools.data_converter.sunrgbd_data_utils import SUNRGBDData
def create_indoor_info_file(data_path,
pkl_prefix='sunrgbd',
save_path=None,
use_v1=False):
"""Create indoor information file.
Get information of the raw data and save it to the pkl file.
Args:
data_path (str): Path of the data.
pkl_prefix (str): Prefix of the pkl to be saved. Default: 'sunrgbd'.
save_path (str): Path of the pkl to be saved. Default: None.
use_v1 (bool): Whether to use v1. Default: False.
"""
assert os.path.exists(data_path)
assert pkl_prefix in ['sunrgbd', 'scannet']
save_path = data_path if save_path is None else save_path
assert os.path.exists(save_path)
train_filename = os.path.join(save_path, f'{pkl_prefix}_infos_train.pkl')
val_filename = os.path.join(save_path, f'{pkl_prefix}_infos_val.pkl')
if pkl_prefix == 'sunrgbd':
train_dataset = SUNRGBDData(
root_path=data_path, split='train', use_v1=use_v1)
val_dataset = SUNRGBDData(
root_path=data_path, split='val', use_v1=use_v1)
else:
train_dataset = ScanNetData(root_path=data_path, split='train')
val_dataset = ScanNetData(root_path=data_path, split='val')
infos_train = train_dataset.get_infos(has_label=True)
mmcv.dump(infos_train, train_filename, 'pkl')
print(f'{pkl_prefix} info train file is saved to {train_filename}')
infos_val = val_dataset.get_infos(has_label=True)
mmcv.dump(infos_val, val_filename, 'pkl')
print(f'{pkl_prefix} info val file is saved to {val_filename}')
import os
import pickle
from pathlib import Path
from tools.data_converter.scannet_data_utils import ScanNetData
def create_scannet_info_file(data_path, pkl_prefix='scannet', save_path=None):
assert os.path.exists(data_path)
if save_path is None:
save_path = Path(data_path)
else:
save_path = Path(save_path)
assert os.path.exists(save_path)
train_filename = save_path / f'{pkl_prefix}_infos_train.pkl'
val_filename = save_path / f'{pkl_prefix}_infos_val.pkl'
train_dataset = ScanNetData(root_path=data_path, split='train')
val_dataset = ScanNetData(root_path=data_path, split='val')
scannet_infos_train = train_dataset.get_scannet_infos(has_label=True)
with open(train_filename, 'wb') as f:
pickle.dump(scannet_infos_train, f)
print('Scannet info train file is saved to %s' % train_filename)
scannet_infos_val = val_dataset.get_scannet_infos(has_label=True)
with open(val_filename, 'wb') as f:
pickle.dump(scannet_infos_val, f)
print('Scannet info val file is saved to %s' % val_filename)
if __name__ == '__main__':
create_scannet_info_file(
data_path='./data/scannet', save_path='./data/scannet')
import concurrent.futures as futures
import os import os
import mmcv
import numpy as np import numpy as np
class ScanNetData(object): class ScanNetData(object):
''' Load and parse object data ''' """ScanNet Data
Generate scannet infos for scannet_converter
Args:
root_path (str): Root path of the raw data
split (str): Set split type of the data. Default: 'train'.
"""
def __init__(self, root_path, split='train'): def __init__(self, root_path, split='train'):
self.root_dir = root_path self.root_dir = root_path
...@@ -25,28 +34,37 @@ class ScanNetData(object): ...@@ -25,28 +34,37 @@ class ScanNetData(object):
for i, nyu40id in enumerate(list(self.cat_ids)) for i, nyu40id in enumerate(list(self.cat_ids))
} }
assert split in ['train', 'val', 'test'] assert split in ['train', 'val', 'test']
split_dir = os.path.join(self.root_dir, 'meta_data', split_file = os.path.join(self.root_dir, 'meta_data',
'scannetv2_%s.txt' % split) f'scannetv2_{split}.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines() mmcv.check_file_exist(split_file)
] if os.path.exists(split_dir) else None self.sample_id_list = mmcv.list_from_file(split_file)
def __len__(self): def __len__(self):
return len(self.sample_id_list) return len(self.sample_id_list)
def get_box_label(self, idx): def get_box_label(self, idx):
box_file = os.path.join(self.root_dir, 'scannet_train_instance_data', box_file = os.path.join(self.root_dir, 'scannet_train_instance_data',
'%s_bbox.npy' % idx) f'{idx}_bbox.npy')
assert os.path.exists(box_file) assert os.path.exists(box_file)
return np.load(box_file) return np.load(box_file)
def get_scannet_infos(self, def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):
num_workers=4, """Get data infos.
has_label=True,
sample_id_list=None): This method gets information from the raw data.
import concurrent.futures as futures
Args:
num_workers (int): Number of threads to be used. Default: 4.
has_label (bool): Whether the data has label. Default: True.
sample_id_list (List[int]): Index list of the sample.
Default: None.
Returns:
infos (List[dict]): Information of the raw data.
"""
def process_single_scene(sample_idx): def process_single_scene(sample_idx):
print('%s sample_idx: %s' % (self.split, sample_idx)) print(f'{self.split} sample_idx: {sample_idx}')
info = dict() info = dict()
pc_info = {'num_features': 6, 'lidar_idx': sample_idx} pc_info = {'num_features': 6, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info info['point_cloud'] = pc_info
......
import os
import pickle
from pathlib import Path
from tools.data_converter.sunrgbd_data_utils import SUNRGBDData
def create_sunrgbd_info_file(data_path,
pkl_prefix='sunrgbd',
save_path=None,
use_v1=False):
assert os.path.exists(data_path)
if save_path is None:
save_path = Path(data_path)
else:
save_path = Path(save_path)
assert os.path.exists(save_path)
train_filename = save_path / f'{pkl_prefix}_infos_train.pkl'
val_filename = save_path / f'{pkl_prefix}_infos_val.pkl'
train_dataset = SUNRGBDData(
root_path=data_path, split='train', use_v1=use_v1)
val_dataset = SUNRGBDData(root_path=data_path, split='val', use_v1=use_v1)
sunrgbd_infos_train = train_dataset.get_sunrgbd_infos(has_label=True)
with open(train_filename, 'wb') as f:
pickle.dump(sunrgbd_infos_train, f)
print('Sunrgbd info train file is saved to %s' % train_filename)
sunrgbd_infos_val = val_dataset.get_sunrgbd_infos(has_label=True)
with open(val_filename, 'wb') as f:
pickle.dump(sunrgbd_infos_val, f)
print('Sunrgbd info val file is saved to %s' % val_filename)
if __name__ == '__main__':
create_sunrgbd_info_file(
data_path='./data/sunrgbd/sunrgbd_trainval',
save_path='./data/sunrgbd')
import concurrent.futures as futures
import os import os
import cv2 import mmcv
import numpy as np import numpy as np
import scipy.io as sio import scipy.io as sio
def random_sampling(pc, num_sample, replace=None, return_choices=False): def random_sampling(points, num_points, replace=None, return_choices=False):
""" Input is NxC, output is num_samplexC """Random Sampling.
Sampling point cloud to a certain number of points.
Args:
points (ndarray): Point cloud.
num_points (int): The number of samples.
replace (bool): Whether the sample is with or without replacement.
return_choices (bool): Whether to return choices.
Returns:
points (ndarray): Point cloud after sampling.
""" """
if replace is None: if replace is None:
replace = (pc.shape[0] < num_sample) replace = (points.shape[0] < num_points)
choices = np.random.choice(pc.shape[0], num_sample, replace=replace) choices = np.random.choice(points.shape[0], num_points, replace=replace)
if return_choices: if return_choices:
return pc[choices], choices return points[choices], choices
else: else:
return pc[choices] return points[choices]
class SUNRGBDInstance(object): class SUNRGBDInstance(object):
...@@ -44,7 +57,15 @@ class SUNRGBDInstance(object): ...@@ -44,7 +57,15 @@ class SUNRGBDInstance(object):
class SUNRGBDData(object): class SUNRGBDData(object):
''' Load and parse object data ''' """SUNRGBD Data
Generate scannet infos for sunrgbd_converter
Args:
root_path (str): Root path of the raw data.
split (str): Set split type of the data. Default: 'train'.
use_v1 (bool): Whether to use v1. Default: False.
"""
def __init__(self, root_path, split='train', use_v1=False): def __init__(self, root_path, split='train', use_v1=False):
self.root_dir = root_path self.root_dir = root_path
...@@ -60,11 +81,9 @@ class SUNRGBDData(object): ...@@ -60,11 +81,9 @@ class SUNRGBDData(object):
for label in range(len(self.classes)) for label in range(len(self.classes))
} }
assert split in ['train', 'val', 'test'] assert split in ['train', 'val', 'test']
split_dir = os.path.join(self.root_dir, '%s_data_idx.txt' % split) split_file = os.path.join(self.root_dir, f'{split}_data_idx.txt')
self.sample_id_list = [ mmcv.check_file_exist(split_file)
int(x.strip()) for x in open(split_dir).readlines() self.sample_id_list = map(int, mmcv.list_from_file(split_file))
] if os.path.exists(split_dir) else None
self.image_dir = os.path.join(self.split_dir, 'image') self.image_dir = os.path.join(self.split_dir, 'image')
self.calib_dir = os.path.join(self.split_dir, 'calib') self.calib_dir = os.path.join(self.split_dir, 'calib')
self.depth_dir = os.path.join(self.split_dir, 'depth') self.depth_dir = os.path.join(self.split_dir, 'depth')
...@@ -77,20 +96,20 @@ class SUNRGBDData(object): ...@@ -77,20 +96,20 @@ class SUNRGBDData(object):
return len(self.sample_id_list) return len(self.sample_id_list)
def get_image(self, idx): def get_image(self, idx):
img_filename = os.path.join(self.image_dir, '%06d.jpg' % (idx)) img_filename = os.path.join(self.image_dir, f'{idx:06d}.jpg')
return cv2.imread(img_filename) return mmcv.imread(img_filename)
def get_image_shape(self, idx): def get_image_shape(self, idx):
image = self.get_image(idx) image = self.get_image(idx)
return np.array(image.shape[:2], dtype=np.int32) return np.array(image.shape[:2], dtype=np.int32)
def get_depth(self, idx): def get_depth(self, idx):
depth_filename = os.path.join(self.depth_dir, '%06d.mat' % (idx)) depth_filename = os.path.join(self.depth_dir, f'{idx:06d}.mat')
depth = sio.loadmat(depth_filename)['instance'] depth = sio.loadmat(depth_filename)['instance']
return depth return depth
def get_calibration(self, idx): def get_calibration(self, idx):
calib_filepath = os.path.join(self.calib_dir, '%06d.txt' % (idx)) calib_filepath = os.path.join(self.calib_dir, f'{idx:06d}.txt')
lines = [line.rstrip() for line in open(calib_filepath)] lines = [line.rstrip() for line in open(calib_filepath)]
Rt = np.array([float(x) for x in lines[0].split(' ')]) Rt = np.array([float(x) for x in lines[0].split(' ')])
Rt = np.reshape(Rt, (3, 3), order='F') Rt = np.reshape(Rt, (3, 3), order='F')
...@@ -98,23 +117,33 @@ class SUNRGBDData(object): ...@@ -98,23 +117,33 @@ class SUNRGBDData(object):
return K, Rt return K, Rt
def get_label_objects(self, idx): def get_label_objects(self, idx):
label_filename = os.path.join(self.label_dir, '%06d.txt' % (idx)) label_filename = os.path.join(self.label_dir, f'{idx:06d}.txt')
lines = [line.rstrip() for line in open(label_filename)] lines = [line.rstrip() for line in open(label_filename)]
objects = [SUNRGBDInstance(line) for line in lines] objects = [SUNRGBDInstance(line) for line in lines]
return objects return objects
def get_sunrgbd_infos(self, def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):
num_workers=4, """Get data infos.
has_label=True,
sample_id_list=None): This method gets information from the raw data.
import concurrent.futures as futures
Args:
num_workers (int): Number of threads to be used. Default: 4.
has_label (bool): Whether the data has label. Default: True.
sample_id_list (List[int]): Index list of the sample.
Default: None.
Returns:
infos (List[dict]): Information of the raw data.
"""
def process_single_scene(sample_idx): def process_single_scene(sample_idx):
print('%s sample_idx: %s' % (self.split, sample_idx)) print(f'{self.split} sample_idx: {sample_idx}')
# convert depth to points # convert depth to points
SAMPLE_NUM = 50000 SAMPLE_NUM = 50000
# TODO: Check whether can move the point
# sampling process during training.
pc_upright_depth = self.get_depth(sample_idx) pc_upright_depth = self.get_depth(sample_idx)
# TODO : sample points in loading process and test
pc_upright_depth_subsampled = random_sampling( pc_upright_depth_subsampled = random_sampling(
pc_upright_depth, SAMPLE_NUM) pc_upright_depth, SAMPLE_NUM)
np.save( np.save(
...@@ -124,7 +153,7 @@ class SUNRGBDData(object): ...@@ -124,7 +153,7 @@ class SUNRGBDData(object):
info = dict() info = dict()
pc_info = {'num_features': 6, 'lidar_idx': sample_idx} pc_info = {'num_features': 6, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info info['point_cloud'] = pc_info
img_name = os.path.join(self.image_dir, '%06d.jpg' % (sample_idx)) img_name = os.path.join(self.image_dir, f'{sample_idx:06d}')
img_path = os.path.join(self.image_dir, img_name) img_path = os.path.join(self.image_dir, img_name)
image_info = { image_info = {
'image_idx': sample_idx, 'image_idx': sample_idx,
...@@ -183,8 +212,7 @@ class SUNRGBDData(object): ...@@ -183,8 +212,7 @@ class SUNRGBDData(object):
return info return info
lidar_save_dir = os.path.join(self.root_dir, 'lidar') lidar_save_dir = os.path.join(self.root_dir, 'lidar')
if not os.path.exists(lidar_save_dir): mmcv.mkdir_or_exist(lidar_save_dir)
os.mkdir(lidar_save_dir)
sample_id_list = sample_id_list if \ sample_id_list = sample_id_list if \
sample_id_list is not None else self.sample_id_list sample_id_list is not None else self.sample_id_list
with futures.ThreadPoolExecutor(num_workers) as executor: with futures.ThreadPoolExecutor(num_workers) as executor:
......
#!/usr/bin/env bash #!/usr/bin/env bash
set -x set -x
export PYTHONPATH=`pwd`:$PYTHONPATH
PARTITION=$1 PARTITION=$1
JOB_NAME=$2 JOB_NAME=$2
...@@ -20,4 +19,4 @@ srun -p ${PARTITION} \ ...@@ -20,4 +19,4 @@ srun -p ${PARTITION} \
--ntasks-per-node=${GPUS_PER_NODE} \ --ntasks-per-node=${GPUS_PER_NODE} \
--kill-on-bad-exit=1 \ --kill-on-bad-exit=1 \
${SRUN_ARGS} \ ${SRUN_ARGS} \
python -u tools/train.py ${CONFIG} --work_dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS}
from __future__ import division from __future__ import division
import argparse import argparse
import copy import copy
import logging
import os import os
import os.path as osp import os.path as osp
import time import time
...@@ -11,10 +12,11 @@ from mmcv import Config ...@@ -11,10 +12,11 @@ from mmcv import Config
from mmcv.runner import init_dist from mmcv.runner import init_dist
from mmdet3d import __version__ from mmdet3d import __version__
from mmdet3d.apis import train_detector
from mmdet3d.datasets import build_dataset from mmdet3d.datasets import build_dataset
from mmdet3d.models import build_detector from mmdet3d.models import build_detector
from mmdet3d.utils import collect_env from mmdet3d.utils import collect_env, get_root_logger
from mmdet.apis import get_root_logger, set_random_seed, train_detector from mmdet.apis import set_random_seed
def parse_args(): def parse_args():
...@@ -27,12 +29,18 @@ def parse_args(): ...@@ -27,12 +29,18 @@ def parse_args():
'--validate', '--validate',
action='store_true', action='store_true',
help='whether to evaluate the checkpoint during training') help='whether to evaluate the checkpoint during training')
parser.add_argument( group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus', '--gpus',
type=int, type=int,
default=1,
help='number of gpus to use ' help='number of gpus to use '
'(only applicable to non-distributed training)') '(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=0, help='random seed') parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument( parser.add_argument(
'--deterministic', '--deterministic',
...@@ -73,11 +81,14 @@ def main(): ...@@ -73,11 +81,14 @@ def main():
osp.splitext(osp.basename(args.config))[0]) osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None: if args.resume_from is not None:
cfg.resume_from = args.resume_from cfg.resume_from = args.resume_from
cfg.gpus = args.gpus if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
if args.autoscale_lr: if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677) # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8 cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8
# init distributed env first, since logger depends on the dist info. # init distributed env first, since logger depends on the dist info.
if args.launcher == 'none': if args.launcher == 'none':
...@@ -93,6 +104,10 @@ def main(): ...@@ -93,6 +104,10 @@ def main():
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp)) log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# add a logging filter
logging_filter = logging.Filter('mmdet')
logging_filter.filter = lambda record: record.find('mmdet') != -1
# init the meta dict to record some important information such as # init the meta dict to record some important information such as
# environment info and seed, which will be logged # environment info and seed, which will be logged
meta = dict() meta = dict()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment