Unverified Commit 22b4bb4e authored by Ziyi Wu's avatar Ziyi Wu Committed by GitHub
Browse files

[Fix] Fix small errors in seg dataset and cfgs (#487)

* fix small errors in seg dataset cfgs

* add box field to 3d seg dataset

* fix small bug in S3DIS data generation scripts

* fix small bugs in scannet data generation
parent 25a1bcae
...@@ -44,8 +44,30 @@ test_pipeline = [ ...@@ -44,8 +44,30 @@ test_pipeline = [
load_dim=6, load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]), use_dim=[0, 1, 2, 3, 4, 5]),
dict(type='NormalizePointsColor', color_mean=None), dict(type='NormalizePointsColor', color_mean=None),
dict(type='DefaultFormatBundle3D', class_names=class_names), dict(
dict(type='Collect3D', keys=['points']) # a wrapper in order to successfully call test function
# actually we don't perform test-time-aug
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.0,
flip_ratio_bev_vertical=0.0),
dict(
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
] ]
# construct a pipeline for data and gt loading in show function # construct a pipeline for data and gt loading in show function
# please keep its loading function consistent with test_pipeline (e.g. client) # please keep its loading function consistent with test_pipeline (e.g. client)
...@@ -94,22 +116,24 @@ data = dict( ...@@ -94,22 +116,24 @@ data = dict(
data_root + f'seg_info/Area_{i}_resampled_scene_idxs.npy' data_root + f'seg_info/Area_{i}_resampled_scene_idxs.npy'
for i in train_area for i in train_area
], ],
label_weight=[ label_weights=[
data_root + f'seg_info/Area_{i}_label_weight.npy' data_root + f'seg_info/Area_{i}_label_weight.npy'
for i in train_area for i in train_area
]), ]),
val=dict( val=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + f's3dis_infos_Area_{test_area}.pkl', ann_files=data_root + f's3dis_infos_Area_{test_area}.pkl',
pipeline=test_pipeline, pipeline=test_pipeline,
classes=class_names, classes=class_names,
test_mode=True, test_mode=True,
ignore_index=len(class_names)), ignore_index=len(class_names),
scene_idxs=data_root +
f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'),
test=dict( test=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + f's3dis_infos_Area_{test_area}.pkl', ann_files=data_root + f's3dis_infos_Area_{test_area}.pkl',
pipeline=test_pipeline, pipeline=test_pipeline,
classes=class_names, classes=class_names,
test_mode=True, test_mode=True,
......
...@@ -45,8 +45,30 @@ test_pipeline = [ ...@@ -45,8 +45,30 @@ test_pipeline = [
load_dim=6, load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]), use_dim=[0, 1, 2, 3, 4, 5]),
dict(type='NormalizePointsColor', color_mean=None), dict(type='NormalizePointsColor', color_mean=None),
dict(type='DefaultFormatBundle3D', class_names=class_names), dict(
dict(type='Collect3D', keys=['points']) # a wrapper in order to successfully call test function
# actually we don't perform test-time-aug
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.0,
flip_ratio_bev_vertical=0.0),
dict(
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
] ]
# construct a pipeline for data and gt loading in show function # construct a pipeline for data and gt loading in show function
# please keep its loading function consistent with test_pipeline (e.g. client) # please keep its loading function consistent with test_pipeline (e.g. client)
......
import argparse import argparse
import mmcv import mmcv
from indoor3d_util import export
from os import path as osp from os import path as osp
from .indoor3d_util import export
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
'--output-folder', '--output-folder',
......
...@@ -52,6 +52,7 @@ def export_one_scan(scan_name, ...@@ -52,6 +52,7 @@ def export_one_scan(scan_name,
print(f'Num of care instances: {instance_bboxes.shape[0]}') print(f'Num of care instances: {instance_bboxes.shape[0]}')
if max_num_point is not None: if max_num_point is not None:
max_num_point = int(max_num_point)
N = mesh_vertices.shape[0] N = mesh_vertices.shape[0]
if N > max_num_point: if N > max_num_point:
choices = np.random.choice(N, max_num_point, replace=False) choices = np.random.choice(N, max_num_point, replace=False)
......
...@@ -58,7 +58,7 @@ def export(mesh_file, ...@@ -58,7 +58,7 @@ def export(mesh_file,
meta_file, meta_file,
label_map_file, label_map_file,
output_file=None, output_file=None,
is_train=True): test_mode=False):
"""Export original files to vert, ins_label, sem_label and bbox file. """Export original files to vert, ins_label, sem_label and bbox file.
Args: Args:
...@@ -69,8 +69,8 @@ def export(mesh_file, ...@@ -69,8 +69,8 @@ def export(mesh_file,
label_map_file (str): Path of the label_map_file. label_map_file (str): Path of the label_map_file.
output_file (str): Path of the output folder. output_file (str): Path of the output folder.
Default: None. Default: None.
is_train (bool): Whether is generating training data with labels. test_mode (bool): Whether is generating training data without labels.
Default: True. Default: False.
It returns a tuple, which containts the the following things: It returns a tuple, which containts the the following things:
np.ndarray: Vertices of points data. np.ndarray: Vertices of points data.
...@@ -103,7 +103,7 @@ def export(mesh_file, ...@@ -103,7 +103,7 @@ def export(mesh_file,
mesh_vertices[:, 0:3] = pts[:, 0:3] mesh_vertices[:, 0:3] = pts[:, 0:3]
# Load semantic and instance labels # Load semantic and instance labels
if is_train: if not test_mode:
object_id_to_segs, label_to_segs = read_aggregation(agg_file) object_id_to_segs, label_to_segs = read_aggregation(agg_file)
seg_to_verts, num_verts = read_segmentation(seg_file) seg_to_verts, num_verts = read_segmentation(seg_file)
label_ids = np.zeros(shape=(num_verts), dtype=np.uint32) label_ids = np.zeros(shape=(num_verts), dtype=np.uint32)
...@@ -147,7 +147,7 @@ def export(mesh_file, ...@@ -147,7 +147,7 @@ def export(mesh_file,
if output_file is not None: if output_file is not None:
np.save(output_file + '_vert.npy', mesh_vertices) np.save(output_file + '_vert.npy', mesh_vertices)
if is_train: if not test_mode:
np.save(output_file + '_sem_label.npy', label_ids) np.save(output_file + '_sem_label.npy', label_ids)
np.save(output_file + '_ins_label.npy', instance_ids) np.save(output_file + '_ins_label.npy', instance_ids)
np.save(output_file + '_bbox.npy', instance_bboxes) np.save(output_file + '_bbox.npy', instance_bboxes)
......
...@@ -143,6 +143,7 @@ class Custom3DSegDataset(Dataset): ...@@ -143,6 +143,7 @@ class Custom3DSegDataset(Dataset):
results['pts_seg_fields'] = [] results['pts_seg_fields'] = []
results['mask_fields'] = [] results['mask_fields'] = []
results['seg_fields'] = [] results['seg_fields'] = []
results['gt_bboxes_3d'] = []
def prepare_train_data(self, index): def prepare_train_data(self, index):
"""Training data preparation. """Training data preparation.
......
...@@ -8,6 +8,6 @@ line_length = 79 ...@@ -8,6 +8,6 @@ line_length = 79
multi_line_output = 0 multi_line_output = 0
known_standard_library = setuptools known_standard_library = setuptools
known_first_party = mmdet,mmdet3d known_first_party = mmdet,mmdet3d
known_third_party = cv2,load_scannet_data,lyft_dataset_sdk,m2r,matplotlib,mmcv,nuimages,numba,numpy,nuscenes,pandas,plyfile,pycocotools,pyquaternion,pytest,recommonmark,scannet_utils,scipy,seaborn,shapely,skimage,tensorflow,terminaltables,torch,trimesh,waymo_open_dataset known_third_party = cv2,indoor3d_util,load_scannet_data,lyft_dataset_sdk,m2r,matplotlib,mmcv,nuimages,numba,numpy,nuscenes,pandas,plyfile,pycocotools,pyquaternion,pytest,recommonmark,scannet_utils,scipy,seaborn,shapely,skimage,tensorflow,terminaltables,torch,trimesh,waymo_open_dataset
no_lines_before = STDLIB,LOCALFOLDER no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY default_section = THIRDPARTY
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment