Commit 8b3b1104 authored by liyinhao's avatar liyinhao
Browse files

finish getitem and unittest

parent 7a43a52a
......@@ -11,6 +11,8 @@ from .pipelines import (GlobalRotScale, IndoorFlipData, IndoorGlobalRotScale,
IndoorPointsColorNormalize, ObjectNoise,
ObjectRangeFilter, ObjectSample, PointShuffle,
PointsRangeFilter, RandomFlip3D)
from .scannet_dataset import ScannetDataset
from .sunrgbd_dataset import SunrgbdDataset
__all__ = [
'KittiDataset', 'GroupSampler', 'DistributedGroupSampler',
......@@ -20,5 +22,6 @@ __all__ = [
'ObjectRangeFilter', 'PointsRangeFilter', 'Collect3D',
'IndoorLoadPointsFromFile', 'IndoorPointsColorNormalize',
'IndoorPointSample', 'IndoorLoadAnnotations3D', 'IndoorPointsColorJitter',
'IndoorGlobalRotScale', 'IndoorFlipData'
'IndoorGlobalRotScale', 'IndoorFlipData', 'SunrgbdDataset',
'ScannetDataset'
]
......@@ -69,12 +69,12 @@ class ScannetDataset(torch_data.Dataset):
self.root_path = root_path
self.class_names = class_names if class_names else self.CLASSES
self.data_path = os.path.join(root_path, 'scannet_train_instance_data')
self.data_path = osp.join(root_path, 'scannet_train_instance_data')
self.test_mode = test_mode
self.training = training
self.mode = 'TRAIN' if self.training else 'TEST'
self.ann_file = ann_file
mmcv.check_file_exist(ann_file)
self.scannet_infos = mmcv.load(ann_file)
# dataset config
......
import copy
import os
import os.path as osp
import mmcv
import numpy as np
import torch.utils.data as torch_data
from mmdet.datasets import DATASETS
from .pipelines import Compose
@DATASETS.register_module()
class SunrgbdDataset(torch_data.Dataset):
type2class = {
'bed': 0,
'table': 1,
'sofa': 2,
'chair': 3,
'toilet': 4,
'desk': 5,
'dresser': 6,
'night_stand': 7,
'bookshelf': 8,
'bathtub': 9
}
class2type = {
0: 'bed',
1: 'table',
2: 'sofa',
3: 'chair',
4: 'toilet',
5: 'desk',
6: 'dresser',
7: 'night_stand',
8: 'bookshelf',
9: 'bathtub'
}
CLASSES = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser',
'night_stand', 'bookshelf', 'bathtub')
def __init__(self,
root_path,
ann_file,
pipeline=None,
training=False,
class_names=None,
test_mode=False,
with_label=True):
super().__init__()
self.root_path = root_path
self.class_names = class_names if class_names else self.CLASSES
self.data_path = osp.join(root_path, 'sunrgbd_trainval')
self.test_mode = test_mode
self.training = training
self.mode = 'TRAIN' if self.training else 'TEST'
mmcv.check_file_exist(ann_file)
self.sunrgbd_infos = mmcv.load(ann_file)
# dataset config
self.num_class = len(self.class_names)
self.pcd_limit_range = [0, -40, -3.0, 70.4, 40, 3.0]
if pipeline is not None:
self.pipeline = Compose(pipeline)
self.with_label = with_label
def __getitem__(self, idx):
if self.test_mode:
return self._prepare_test_data(idx)
while True:
data = self._prepare_train_data(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def _prepare_test_data(self, index):
input_dict = self._get_sensor_data(index)
example = self.pipeline(input_dict)
return example
def _prepare_train_data(self, index):
input_dict = self._get_sensor_data(index)
input_dict = self._train_pre_pipeline(input_dict)
if input_dict is None:
return None
example = self.pipeline(input_dict)
if len(example['gt_bboxes_3d']._data) == 0:
return None
return example
def _train_pre_pipeline(self, input_dict):
if len(input_dict['gt_bboxes_3d']) == 0:
return None
return input_dict
def _get_sensor_data(self, index):
info = self.sunrgbd_infos[index]
sample_idx = info['point_cloud']['lidar_idx']
pts_filename = self._get_pts_filename(sample_idx)
input_dict = dict(pts_filename=pts_filename)
if self.with_label:
annos = self._get_ann_info(index, sample_idx)
input_dict.update(annos)
return input_dict
def _get_pts_filename(self, sample_idx):
pts_filename = os.path.join(self.data_path, 'lidar',
f'{sample_idx:06d}.npy')
mmcv.check_file_exist(pts_filename)
return pts_filename
def _get_ann_info(self, index, sample_idx):
# Use index to get the annos, thus the evalhook could also use this api
info = self.sunrgbd_infos[index]
if info['annos']['gt_num'] != 0:
gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'] # k, 6
gt_labels = info['annos']['class']
gt_bboxes_3d_mask = np.ones_like(gt_labels).astype(np.bool)
else:
gt_bboxes_3d = np.zeros((1, 6), dtype=np.float32)
gt_labels = np.zeros(1, ).astype(np.bool)
gt_bboxes_3d_mask = np.zeros(1, ).astype(np.bool)
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d,
gt_labels=gt_labels,
gt_bboxes_3d_mask=gt_bboxes_3d_mask)
return anns_results
def _rand_another(self, idx):
pool = np.where(self.flag == self.flag[idx])[0]
return np.random.choice(pool)
def _generate_annotations(self, output):
'''
transfer input_dict & pred_dicts to anno format
which is needed by AP calculator
return annos: a tuple (batch_pred_map_cls,batch_gt_map_cls)
batch_pred_map_cls is a list: i=0,1..bs-1
pred_list_i:[(pred_sem_cls,
box_params, box_score)_j]
j=0,1..num_pred_obj -1
batch_gt_map_cls is a list: i=0,1..bs-1
gt_list_i: [(sem_cls_label, box_params)_j]
j=0,1..num_gt_obj -1
'''
result = []
bs = len(output)
for i in range(bs):
pred_list_i = list()
pred_boxes = output[i]
box3d_depth = pred_boxes['box3d_lidar']
if box3d_depth is not None:
label_preds = pred_boxes['label_preds']
scores = pred_boxes['scores'].detach().cpu().numpy()
label_preds = label_preds.detach().cpu().numpy()
num_proposal = box3d_depth.shape[0]
for j in range(num_proposal):
bbox_lidar = box3d_depth[j] # [7] in lidar
bbox_lidar_bottom = bbox_lidar.copy()
pred_list_i.append(
(label_preds[j], bbox_lidar_bottom, scores[j]))
result.append(pred_list_i)
else:
result.append(pred_list_i)
return result
def _format_results(self, outputs):
results = []
for output in outputs:
result = self._generate_annotations(output)
results.append(result)
return results
def evaluate(self, results, metric=None):
results = self._format_results(results)
from mmdet3d.core.evaluation import indoor_eval
assert ('AP_IOU_THRESHHOLDS' in metric)
gt_annos = [
copy.deepcopy(info['annos']) for info in self.sunrgbd_infos
]
ap_result_str, ap_dict = indoor_eval(gt_annos, results, metric,
self.class2type)
return ap_dict
def __len__(self):
return len(self.sunrgbd_infos)
import numpy as np
from mmdet3d.datasets import SunrgbdDataset
def test_getitem():
np.random.seed(0)
root_path = './tests/data/sunrgbd'
ann_file = './tests/data/sunrgbd/sunrgbd_infos.pkl'
class_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk',
'dresser', 'night_stand', 'bookshelf', 'bathtub')
pipelines = [
dict(
type='IndoorLoadPointsFromFile',
use_height=True,
load_dim=6,
use_dim=[0, 1, 2]),
dict(type='IndoorFlipData', flip_ratio_yz=1.0),
dict(
type='IndoorGlobalRotScale',
use_height=True,
rot_range=[-np.pi / 6, np.pi / 6],
scale_range=[0.85, 1.15]),
dict(type='IndoorPointSample', num_points=5),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels']),
]
sunrgbd_dataset = SunrgbdDataset(root_path, ann_file, pipelines, True)
data = sunrgbd_dataset[0]
points = data['points']._data
gt_bboxes_3d = data['gt_bboxes_3d']._data
gt_labels = data['gt_labels']._data
expected_points = np.array(
[[0.6570105, 1.5538014, 0.24514851, 1.0165423],
[0.656101, 1.558591, 0.21755838, 0.98895216],
[0.6293659, 1.5679953, -0.10004003, 0.67135376],
[0.6068739, 1.5974995, -0.41063973, 0.36075398],
[0.6464709, 1.5573514, 0.15114647, 0.9225402]])
expected_gt_bboxes_3d = np.array([[
-2.012483, 3.9473376, -0.25446942, 2.3730404, 1.9457763, 2.0303352,
1.2205974
],
[
-3.7036808, 4.2396426, -0.81091917,
0.6032123, 0.91040343, 1.003341,
1.2662518
],
[
0.6528646, 2.1638472, -0.15228128,
0.7347852, 1.6113238, 2.1694272,
2.81404
]])
expected_gt_labels = np.array([0, 7, 6])
assert np.allclose(points, expected_points)
assert np.allclose(gt_bboxes_3d, expected_gt_bboxes_3d)
assert np.all(gt_labels.numpy() == expected_gt_labels)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment