Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
4f1a5e52
Commit
4f1a5e52
authored
May 08, 2020
by
liyinhao
Browse files
Merge branch 'master_temp' into indoor_augment
parents
c2c0f3d8
f584b970
Changes
111
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
335 additions
and
162 deletions
+335
-162
tests/test_roiaware_pool3d.py
tests/test_roiaware_pool3d.py
+10
-42
tests/test_semantic_heads.py
tests/test_semantic_heads.py
+75
-0
tests/test_sparse_unet.py
tests/test_sparse_unet.py
+94
-0
tools/create_data.py
tools/create_data.py
+3
-4
tools/data_converter/indoor_converter.py
tools/data_converter/indoor_converter.py
+44
-0
tools/data_converter/scannet_converter.py
tools/data_converter/scannet_converter.py
+0
-31
tools/data_converter/scannet_data_utils.py
tools/data_converter/scannet_data_utils.py
+30
-12
tools/data_converter/sunrgbd_converter.py
tools/data_converter/sunrgbd_converter.py
+0
-36
tools/data_converter/sunrgbd_data_utils.py
tools/data_converter/sunrgbd_data_utils.py
+57
-29
tools/slurm_train.sh
tools/slurm_train.sh
+1
-2
tools/train.py
tools/train.py
+21
-6
No files found.
tests/test_roiaware_pool3d.py
View file @
4f1a5e52
...
...
@@ -6,8 +6,8 @@ from mmdet3d.ops.roiaware_pool3d import (RoIAwarePool3d, points_in_boxes_cpu,
def
test_RoIAwarePool3d
():
if
not
torch
.
cuda
.
is_available
(
):
# RoIAwarePool3d only support gpu version currently.
# RoIAwarePool3d only support gpu version currently.
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
roiaware_pool3d_max
=
RoIAwarePool3d
(
out_size
=
4
,
max_pts_per_voxel
=
128
,
mode
=
'max'
)
...
...
@@ -19,23 +19,10 @@ def test_RoIAwarePool3d():
dtype
=
torch
.
float32
).
cuda
(
)
# boxes (m, 7) with bottom center in lidar coordinate
pts
=
torch
.
tensor
(
[
[
1
,
2
,
3.3
],
[
1.2
,
2.5
,
3.0
],
[
0.8
,
2.1
,
3.5
],
[
1.6
,
2.6
,
3.6
],
[
0.8
,
1.2
,
3.9
],
[
-
9.2
,
21.0
,
18.2
],
[
3.8
,
7.9
,
6.3
],
[
4.7
,
3.5
,
-
12.2
],
[
3.8
,
7.6
,
-
2
],
[
-
10.6
,
-
12.9
,
-
20
],
[
-
16
,
-
18
,
9
],
[
-
21.3
,
-
52
,
-
5
],
[
0
,
0
,
0
],
[
6
,
7
,
8
],
[
-
2
,
-
3
,
-
4
],
],
[[
1
,
2
,
3.3
],
[
1.2
,
2.5
,
3.0
],
[
0.8
,
2.1
,
3.5
],
[
1.6
,
2.6
,
3.6
],
[
0.8
,
1.2
,
3.9
],
[
-
9.2
,
21.0
,
18.2
],
[
3.8
,
7.9
,
6.3
],
[
4.7
,
3.5
,
-
12.2
],
[
3.8
,
7.6
,
-
2
],
[
-
10.6
,
-
12.9
,
-
20
],
[
-
16
,
-
18
,
9
],
[
-
21.3
,
-
52
,
-
5
],
[
0
,
0
,
0
],
[
6
,
7
,
8
],
[
-
2
,
-
3
,
-
4
]],
dtype
=
torch
.
float32
).
cuda
()
# points (n, 3) in lidar coordinate
pts_feature
=
pts
.
clone
()
...
...
@@ -83,23 +70,10 @@ def test_points_in_boxes_cpu():
dtype
=
torch
.
float32
)
# boxes (m, 7) with bottom center in lidar coordinate
pts
=
torch
.
tensor
(
[
[
1
,
2
,
3.3
],
[
1.2
,
2.5
,
3.0
],
[
0.8
,
2.1
,
3.5
],
[
1.6
,
2.6
,
3.6
],
[
0.8
,
1.2
,
3.9
],
[
-
9.2
,
21.0
,
18.2
],
[
3.8
,
7.9
,
6.3
],
[
4.7
,
3.5
,
-
12.2
],
[
3.8
,
7.6
,
-
2
],
[
-
10.6
,
-
12.9
,
-
20
],
[
-
16
,
-
18
,
9
],
[
-
21.3
,
-
52
,
-
5
],
[
0
,
0
,
0
],
[
6
,
7
,
8
],
[
-
2
,
-
3
,
-
4
],
],
[[
1
,
2
,
3.3
],
[
1.2
,
2.5
,
3.0
],
[
0.8
,
2.1
,
3.5
],
[
1.6
,
2.6
,
3.6
],
[
0.8
,
1.2
,
3.9
],
[
-
9.2
,
21.0
,
18.2
],
[
3.8
,
7.9
,
6.3
],
[
4.7
,
3.5
,
-
12.2
],
[
3.8
,
7.6
,
-
2
],
[
-
10.6
,
-
12.9
,
-
20
],
[
-
16
,
-
18
,
9
],
[
-
21.3
,
-
52
,
-
5
],
[
0
,
0
,
0
],
[
6
,
7
,
8
],
[
-
2
,
-
3
,
-
4
]],
dtype
=
torch
.
float32
)
# points (n, 3) in lidar coordinate
point_indices
=
points_in_boxes_cpu
(
points
=
pts
,
boxes
=
boxes
)
...
...
@@ -109,9 +83,3 @@ def test_points_in_boxes_cpu():
dtype
=
torch
.
int32
)
assert
point_indices
.
shape
==
torch
.
Size
([
2
,
15
])
assert
(
point_indices
==
expected_point_indices
).
all
()
if
__name__
==
'__main__'
:
test_points_in_boxes_cpu
()
test_points_in_boxes_gpu
()
test_RoIAwarePool3d
()
tests/test_semantic_heads.py
0 → 100644
View file @
4f1a5e52
import
pytest
import
torch
def
test_PointwiseSemanticHead
():
# PointwiseSemanticHead only support gpu version currently.
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
from
mmdet3d.models.builder
import
build_head
head_cfg
=
dict
(
type
=
'PointwiseSemanticHead'
,
in_channels
=
8
,
extra_width
=
0.2
,
seg_score_thr
=
0.3
,
num_classes
=
3
,
loss_seg
=
dict
(
type
=
'FocalLoss'
,
use_sigmoid
=
True
,
reduction
=
'sum'
,
gamma
=
2.0
,
alpha
=
0.25
,
loss_weight
=
1.0
),
loss_part
=
dict
(
type
=
'CrossEntropyLoss'
,
use_sigmoid
=
True
,
loss_weight
=
1.0
))
self
=
build_head
(
head_cfg
)
self
.
cuda
()
# test forward
voxel_features
=
torch
.
rand
([
4
,
8
],
dtype
=
torch
.
float32
).
cuda
()
feats_dict
=
self
.
forward
(
voxel_features
)
assert
feats_dict
[
'seg_preds'
].
shape
==
torch
.
Size
(
[
voxel_features
.
shape
[
0
],
1
])
assert
feats_dict
[
'part_preds'
].
shape
==
torch
.
Size
(
[
voxel_features
.
shape
[
0
],
3
])
assert
feats_dict
[
'part_feats'
].
shape
==
torch
.
Size
(
[
voxel_features
.
shape
[
0
],
4
])
voxel_centers
=
torch
.
tensor
(
[[
6.56126
,
0.9648336
,
-
1.7339306
],
[
6.8162713
,
-
2.480431
,
-
1.3616394
],
[
11.643568
,
-
4.744306
,
-
1.3580885
],
[
23.482342
,
6.5036807
,
0.5806964
]
],
dtype
=
torch
.
float32
).
cuda
()
# n, point_features
coordinates
=
torch
.
tensor
(
[[
0
,
12
,
819
,
131
],
[
0
,
16
,
750
,
136
],
[
1
,
16
,
705
,
232
],
[
1
,
35
,
930
,
469
]],
dtype
=
torch
.
int32
).
cuda
()
# n, 4(batch, ind_x, ind_y, ind_z)
voxel_dict
=
dict
(
voxel_centers
=
voxel_centers
,
coors
=
coordinates
)
gt_bboxes
=
list
(
torch
.
tensor
(
[[[
6.4118
,
-
3.4305
,
-
1.7291
,
1.7033
,
3.4693
,
1.6197
,
-
0.9091
]],
[[
16.9107
,
9.7925
,
-
1.9201
,
1.6097
,
3.2786
,
1.5307
,
-
2.4056
]]],
dtype
=
torch
.
float32
).
cuda
())
gt_labels
=
list
(
torch
.
tensor
([[
0
],
[
1
]],
dtype
=
torch
.
int64
).
cuda
())
# test get_targets
target_dict
=
self
.
get_targets
(
voxel_dict
,
gt_bboxes
,
gt_labels
)
assert
target_dict
[
'seg_targets'
].
shape
==
torch
.
Size
(
[
voxel_features
.
shape
[
0
]])
assert
target_dict
[
'part_targets'
].
shape
==
torch
.
Size
(
[
voxel_features
.
shape
[
0
],
3
])
# test loss
loss_dict
=
self
.
loss
(
feats_dict
[
'seg_preds'
],
feats_dict
[
'part_preds'
],
target_dict
[
'seg_targets'
],
target_dict
[
'part_targets'
])
assert
loss_dict
[
'loss_seg'
]
>
0
assert
loss_dict
[
'loss_part'
]
==
0
# no points in gt_boxes
total_loss
=
loss_dict
[
'loss_seg'
]
+
loss_dict
[
'loss_part'
]
total_loss
.
backward
()
if
__name__
==
'__main__'
:
test_PointwiseSemanticHead
()
tests/test_sparse_unet.py
0 → 100644
View file @
4f1a5e52
import
torch
import
mmdet3d.ops.spconv
as
spconv
from
mmdet3d.ops
import
SparseBasicBlock
,
SparseBasicBlockV0
def
test_SparseUNet
():
from
mmdet3d.models.middle_encoders.sparse_unet
import
SparseUNet
self
=
SparseUNet
(
in_channels
=
4
,
output_shape
=
[
41
,
1600
,
1408
],
pre_act
=
False
)
# test encoder layers
assert
len
(
self
.
encoder_layers
)
==
4
assert
self
.
encoder_layers
.
encoder_layer1
[
0
][
0
].
in_channels
==
16
assert
self
.
encoder_layers
.
encoder_layer1
[
0
][
0
].
out_channels
==
16
assert
isinstance
(
self
.
encoder_layers
.
encoder_layer1
[
0
][
0
],
spconv
.
conv
.
SubMConv3d
)
assert
isinstance
(
self
.
encoder_layers
.
encoder_layer1
[
0
][
1
],
torch
.
nn
.
modules
.
batchnorm
.
BatchNorm1d
)
assert
isinstance
(
self
.
encoder_layers
.
encoder_layer1
[
0
][
2
],
torch
.
nn
.
modules
.
activation
.
ReLU
)
assert
self
.
encoder_layers
.
encoder_layer4
[
0
][
0
].
in_channels
==
64
assert
self
.
encoder_layers
.
encoder_layer4
[
0
][
0
].
out_channels
==
64
assert
isinstance
(
self
.
encoder_layers
.
encoder_layer4
[
0
][
0
],
spconv
.
conv
.
SparseConv3d
)
assert
isinstance
(
self
.
encoder_layers
.
encoder_layer4
[
2
][
0
],
spconv
.
conv
.
SubMConv3d
)
# test decoder layers
assert
isinstance
(
self
.
lateral_layer1
,
SparseBasicBlock
)
assert
isinstance
(
self
.
merge_layer1
[
0
],
spconv
.
conv
.
SubMConv3d
)
assert
isinstance
(
self
.
upsample_layer1
[
0
],
spconv
.
conv
.
SubMConv3d
)
assert
isinstance
(
self
.
upsample_layer2
[
0
],
spconv
.
conv
.
SparseInverseConv3d
)
voxel_features
=
torch
.
tensor
([[
6.56126
,
0.9648336
,
-
1.7339306
,
0.315
],
[
6.8162713
,
-
2.480431
,
-
1.3616394
,
0.36
],
[
11.643568
,
-
4.744306
,
-
1.3580885
,
0.16
],
[
23.482342
,
6.5036807
,
0.5806964
,
0.35
]],
dtype
=
torch
.
float32
)
# n, point_features
coordinates
=
torch
.
tensor
(
[[
0
,
12
,
819
,
131
],
[
0
,
16
,
750
,
136
],
[
1
,
16
,
705
,
232
],
[
1
,
35
,
930
,
469
]],
dtype
=
torch
.
int32
)
# n, 4(batch, ind_x, ind_y, ind_z)
unet_ret_dict
=
self
.
forward
(
voxel_features
,
coordinates
,
2
)
seg_features
=
unet_ret_dict
[
'seg_features'
]
spatial_features
=
unet_ret_dict
[
'spatial_features'
]
assert
seg_features
.
shape
==
torch
.
Size
([
4
,
16
])
assert
spatial_features
.
shape
==
torch
.
Size
([
2
,
256
,
200
,
176
])
def
test_SparseBasicBlock
():
voxel_features
=
torch
.
tensor
([[
6.56126
,
0.9648336
,
-
1.7339306
,
0.315
],
[
6.8162713
,
-
2.480431
,
-
1.3616394
,
0.36
],
[
11.643568
,
-
4.744306
,
-
1.3580885
,
0.16
],
[
23.482342
,
6.5036807
,
0.5806964
,
0.35
]],
dtype
=
torch
.
float32
)
# n, point_features
coordinates
=
torch
.
tensor
(
[[
0
,
12
,
819
,
131
],
[
0
,
16
,
750
,
136
],
[
1
,
16
,
705
,
232
],
[
1
,
35
,
930
,
469
]],
dtype
=
torch
.
int32
)
# n, 4(batch, ind_x, ind_y, ind_z)
# test v0
self
=
SparseBasicBlockV0
(
4
,
4
,
indice_key
=
'subm0'
,
norm_cfg
=
dict
(
type
=
'BN1d'
,
eps
=
1e-3
,
momentum
=
0.01
))
input_sp_tensor
=
spconv
.
SparseConvTensor
(
voxel_features
,
coordinates
,
[
41
,
1600
,
1408
],
2
)
out_features
=
self
(
input_sp_tensor
)
assert
out_features
.
features
.
shape
==
torch
.
Size
([
4
,
4
])
# test
input_sp_tensor
=
spconv
.
SparseConvTensor
(
voxel_features
,
coordinates
,
[
41
,
1600
,
1408
],
2
)
self
=
SparseBasicBlock
(
4
,
4
,
conv_cfg
=
dict
(
type
=
'SubMConv3d'
,
indice_key
=
'subm1'
),
norm_cfg
=
dict
(
type
=
'BN1d'
,
eps
=
1e-3
,
momentum
=
0.01
))
# test conv and bn layer
assert
isinstance
(
self
.
conv1
,
spconv
.
conv
.
SubMConv3d
)
assert
self
.
conv1
.
in_channels
==
4
assert
self
.
conv1
.
out_channels
==
4
assert
isinstance
(
self
.
conv2
,
spconv
.
conv
.
SubMConv3d
)
assert
self
.
conv2
.
out_channels
==
4
assert
self
.
conv2
.
out_channels
==
4
assert
self
.
bn1
.
eps
==
1e-3
assert
self
.
bn1
.
momentum
==
0.01
out_features
=
self
(
input_sp_tensor
)
assert
out_features
.
features
.
shape
==
torch
.
Size
([
4
,
4
])
tools/create_data.py
View file @
4f1a5e52
import
argparse
import
os.path
as
osp
import
tools.data_converter.indoor_converter
as
indoor
import
tools.data_converter.kitti_converter
as
kitti
import
tools.data_converter.nuscenes_converter
as
nuscenes_converter
import
tools.data_converter.scannet_converter
as
scannet
import
tools.data_converter.sunrgbd_converter
as
sunrgbd
from
tools.data_converter.create_gt_database
import
create_groundtruth_database
...
...
@@ -46,11 +45,11 @@ def nuscenes_data_prep(root_path,
def
scannet_data_prep
(
root_path
,
info_prefix
,
out_dir
):
scannet
.
create_
scannet
_info_file
(
root_path
,
info_prefix
,
out_dir
)
indoor
.
create_
indoor
_info_file
(
root_path
,
info_prefix
,
out_dir
)
def
sunrgbd_data_prep
(
root_path
,
info_prefix
,
out_dir
):
sunrgbd
.
create_
sunrgbd
_info_file
(
root_path
,
info_prefix
,
out_dir
)
indoor
.
create_
indoor
_info_file
(
root_path
,
info_prefix
,
out_dir
)
parser
=
argparse
.
ArgumentParser
(
description
=
'Data converter arg parser'
)
...
...
tools/data_converter/indoor_converter.py
0 → 100644
View file @
4f1a5e52
import
os
import
mmcv
from
tools.data_converter.scannet_data_utils
import
ScanNetData
from
tools.data_converter.sunrgbd_data_utils
import
SUNRGBDData
def
create_indoor_info_file
(
data_path
,
pkl_prefix
=
'sunrgbd'
,
save_path
=
None
,
use_v1
=
False
):
"""Create indoor information file.
Get information of the raw data and save it to the pkl file.
Args:
data_path (str): Path of the data.
pkl_prefix (str): Prefix of the pkl to be saved. Default: 'sunrgbd'.
save_path (str): Path of the pkl to be saved. Default: None.
use_v1 (bool): Whether to use v1. Default: False.
"""
assert
os
.
path
.
exists
(
data_path
)
assert
pkl_prefix
in
[
'sunrgbd'
,
'scannet'
]
save_path
=
data_path
if
save_path
is
None
else
save_path
assert
os
.
path
.
exists
(
save_path
)
train_filename
=
os
.
path
.
join
(
save_path
,
f
'
{
pkl_prefix
}
_infos_train.pkl'
)
val_filename
=
os
.
path
.
join
(
save_path
,
f
'
{
pkl_prefix
}
_infos_val.pkl'
)
if
pkl_prefix
==
'sunrgbd'
:
train_dataset
=
SUNRGBDData
(
root_path
=
data_path
,
split
=
'train'
,
use_v1
=
use_v1
)
val_dataset
=
SUNRGBDData
(
root_path
=
data_path
,
split
=
'val'
,
use_v1
=
use_v1
)
else
:
train_dataset
=
ScanNetData
(
root_path
=
data_path
,
split
=
'train'
)
val_dataset
=
ScanNetData
(
root_path
=
data_path
,
split
=
'val'
)
infos_train
=
train_dataset
.
get_infos
(
has_label
=
True
)
mmcv
.
dump
(
infos_train
,
train_filename
,
'pkl'
)
print
(
f
'
{
pkl_prefix
}
info train file is saved to
{
train_filename
}
'
)
infos_val
=
val_dataset
.
get_infos
(
has_label
=
True
)
mmcv
.
dump
(
infos_val
,
val_filename
,
'pkl'
)
print
(
f
'
{
pkl_prefix
}
info val file is saved to
{
val_filename
}
'
)
tools/data_converter/scannet_converter.py
deleted
100644 → 0
View file @
c2c0f3d8
import
os
import
pickle
from
pathlib
import
Path
from
tools.data_converter.scannet_data_utils
import
ScanNetData
def
create_scannet_info_file
(
data_path
,
pkl_prefix
=
'scannet'
,
save_path
=
None
):
assert
os
.
path
.
exists
(
data_path
)
if
save_path
is
None
:
save_path
=
Path
(
data_path
)
else
:
save_path
=
Path
(
save_path
)
assert
os
.
path
.
exists
(
save_path
)
train_filename
=
save_path
/
f
'
{
pkl_prefix
}
_infos_train.pkl'
val_filename
=
save_path
/
f
'
{
pkl_prefix
}
_infos_val.pkl'
train_dataset
=
ScanNetData
(
root_path
=
data_path
,
split
=
'train'
)
val_dataset
=
ScanNetData
(
root_path
=
data_path
,
split
=
'val'
)
scannet_infos_train
=
train_dataset
.
get_scannet_infos
(
has_label
=
True
)
with
open
(
train_filename
,
'wb'
)
as
f
:
pickle
.
dump
(
scannet_infos_train
,
f
)
print
(
'Scannet info train file is saved to %s'
%
train_filename
)
scannet_infos_val
=
val_dataset
.
get_scannet_infos
(
has_label
=
True
)
with
open
(
val_filename
,
'wb'
)
as
f
:
pickle
.
dump
(
scannet_infos_val
,
f
)
print
(
'Scannet info val file is saved to %s'
%
val_filename
)
if
__name__
==
'__main__'
:
create_scannet_info_file
(
data_path
=
'./data/scannet'
,
save_path
=
'./data/scannet'
)
tools/data_converter/scannet_data_utils.py
View file @
4f1a5e52
import
concurrent.futures
as
futures
import
os
import
mmcv
import
numpy
as
np
class
ScanNetData
(
object
):
''' Load and parse object data '''
"""ScanNet Data
Generate scannet infos for scannet_converter
Args:
root_path (str): Root path of the raw data
split (str): Set split type of the data. Default: 'train'.
"""
def
__init__
(
self
,
root_path
,
split
=
'train'
):
self
.
root_dir
=
root_path
...
...
@@ -25,28 +34,37 @@ class ScanNetData(object):
for
i
,
nyu40id
in
enumerate
(
list
(
self
.
cat_ids
))
}
assert
split
in
[
'train'
,
'val'
,
'test'
]
split_
dir
=
os
.
path
.
join
(
self
.
root_dir
,
'meta_data'
,
'scannetv2_
%s.txt'
%
split
)
self
.
sample_id_list
=
[
x
.
strip
()
for
x
in
open
(
split_dir
).
readlines
(
)
]
if
os
.
path
.
exists
(
split_dir
)
else
None
split_
file
=
os
.
path
.
join
(
self
.
root_dir
,
'meta_data'
,
f
'scannetv2_
{
split
}
.txt'
)
mmcv
.
check_file_exist
(
split_file
)
self
.
sample_id_list
=
mmcv
.
list_from_file
(
split_file
)
def
__len__
(
self
):
return
len
(
self
.
sample_id_list
)
def
get_box_label
(
self
,
idx
):
box_file
=
os
.
path
.
join
(
self
.
root_dir
,
'scannet_train_instance_data'
,
'%s
_bbox.npy'
%
idx
)
f
'
{
idx
}
_bbox.npy'
)
assert
os
.
path
.
exists
(
box_file
)
return
np
.
load
(
box_file
)
def
get_scannet_infos
(
self
,
num_workers
=
4
,
has_label
=
True
,
sample_id_list
=
None
):
import
concurrent.futures
as
futures
def
get_infos
(
self
,
num_workers
=
4
,
has_label
=
True
,
sample_id_list
=
None
):
"""Get data infos.
This method gets information from the raw data.
Args:
num_workers (int): Number of threads to be used. Default: 4.
has_label (bool): Whether the data has label. Default: True.
sample_id_list (List[int]): Index list of the sample.
Default: None.
Returns:
infos (List[dict]): Information of the raw data.
"""
def
process_single_scene
(
sample_idx
):
print
(
'%s sample_idx: %s'
%
(
self
.
split
,
sample_idx
)
)
print
(
f
'
{
self
.
split
}
sample_idx:
{
sample_idx
}
'
)
info
=
dict
()
pc_info
=
{
'num_features'
:
6
,
'lidar_idx'
:
sample_idx
}
info
[
'point_cloud'
]
=
pc_info
...
...
tools/data_converter/sunrgbd_converter.py
deleted
100644 → 0
View file @
c2c0f3d8
import
os
import
pickle
from
pathlib
import
Path
from
tools.data_converter.sunrgbd_data_utils
import
SUNRGBDData
def
create_sunrgbd_info_file
(
data_path
,
pkl_prefix
=
'sunrgbd'
,
save_path
=
None
,
use_v1
=
False
):
assert
os
.
path
.
exists
(
data_path
)
if
save_path
is
None
:
save_path
=
Path
(
data_path
)
else
:
save_path
=
Path
(
save_path
)
assert
os
.
path
.
exists
(
save_path
)
train_filename
=
save_path
/
f
'
{
pkl_prefix
}
_infos_train.pkl'
val_filename
=
save_path
/
f
'
{
pkl_prefix
}
_infos_val.pkl'
train_dataset
=
SUNRGBDData
(
root_path
=
data_path
,
split
=
'train'
,
use_v1
=
use_v1
)
val_dataset
=
SUNRGBDData
(
root_path
=
data_path
,
split
=
'val'
,
use_v1
=
use_v1
)
sunrgbd_infos_train
=
train_dataset
.
get_sunrgbd_infos
(
has_label
=
True
)
with
open
(
train_filename
,
'wb'
)
as
f
:
pickle
.
dump
(
sunrgbd_infos_train
,
f
)
print
(
'Sunrgbd info train file is saved to %s'
%
train_filename
)
sunrgbd_infos_val
=
val_dataset
.
get_sunrgbd_infos
(
has_label
=
True
)
with
open
(
val_filename
,
'wb'
)
as
f
:
pickle
.
dump
(
sunrgbd_infos_val
,
f
)
print
(
'Sunrgbd info val file is saved to %s'
%
val_filename
)
if
__name__
==
'__main__'
:
create_sunrgbd_info_file
(
data_path
=
'./data/sunrgbd/sunrgbd_trainval'
,
save_path
=
'./data/sunrgbd'
)
tools/data_converter/sunrgbd_data_utils.py
View file @
4f1a5e52
import
concurrent.futures
as
futures
import
os
import
cv
2
import
mm
cv
import
numpy
as
np
import
scipy.io
as
sio
def
random_sampling
(
pc
,
num_sample
,
replace
=
None
,
return_choices
=
False
):
""" Input is NxC, output is num_samplexC
def
random_sampling
(
points
,
num_points
,
replace
=
None
,
return_choices
=
False
):
"""Random Sampling.
Sampling point cloud to a certain number of points.
Args:
points (ndarray): Point cloud.
num_points (int): The number of samples.
replace (bool): Whether the sample is with or without replacement.
return_choices (bool): Whether to return choices.
Returns:
points (ndarray): Point cloud after sampling.
"""
if
replace
is
None
:
replace
=
(
p
c
.
shape
[
0
]
<
num_
sample
)
choices
=
np
.
random
.
choice
(
p
c
.
shape
[
0
],
num_
sample
,
replace
=
replace
)
replace
=
(
p
oints
.
shape
[
0
]
<
num_
points
)
choices
=
np
.
random
.
choice
(
p
oints
.
shape
[
0
],
num_
points
,
replace
=
replace
)
if
return_choices
:
return
p
c
[
choices
],
choices
return
p
oints
[
choices
],
choices
else
:
return
p
c
[
choices
]
return
p
oints
[
choices
]
class
SUNRGBDInstance
(
object
):
...
...
@@ -44,7 +57,15 @@ class SUNRGBDInstance(object):
class
SUNRGBDData
(
object
):
''' Load and parse object data '''
"""SUNRGBD Data
Generate scannet infos for sunrgbd_converter
Args:
root_path (str): Root path of the raw data.
split (str): Set split type of the data. Default: 'train'.
use_v1 (bool): Whether to use v1. Default: False.
"""
def
__init__
(
self
,
root_path
,
split
=
'train'
,
use_v1
=
False
):
self
.
root_dir
=
root_path
...
...
@@ -60,11 +81,9 @@ class SUNRGBDData(object):
for
label
in
range
(
len
(
self
.
classes
))
}
assert
split
in
[
'train'
,
'val'
,
'test'
]
split_dir
=
os
.
path
.
join
(
self
.
root_dir
,
'%s_data_idx.txt'
%
split
)
self
.
sample_id_list
=
[
int
(
x
.
strip
())
for
x
in
open
(
split_dir
).
readlines
()
]
if
os
.
path
.
exists
(
split_dir
)
else
None
split_file
=
os
.
path
.
join
(
self
.
root_dir
,
f
'
{
split
}
_data_idx.txt'
)
mmcv
.
check_file_exist
(
split_file
)
self
.
sample_id_list
=
map
(
int
,
mmcv
.
list_from_file
(
split_file
))
self
.
image_dir
=
os
.
path
.
join
(
self
.
split_dir
,
'image'
)
self
.
calib_dir
=
os
.
path
.
join
(
self
.
split_dir
,
'calib'
)
self
.
depth_dir
=
os
.
path
.
join
(
self
.
split_dir
,
'depth'
)
...
...
@@ -77,20 +96,20 @@ class SUNRGBDData(object):
return
len
(
self
.
sample_id_list
)
def
get_image
(
self
,
idx
):
img_filename
=
os
.
path
.
join
(
self
.
image_dir
,
'%
06d.jpg'
%
(
idx
)
)
return
cv
2
.
imread
(
img_filename
)
img_filename
=
os
.
path
.
join
(
self
.
image_dir
,
f
'
{
idx
:
06
d
}
.jpg'
)
return
mm
cv
.
imread
(
img_filename
)
def
get_image_shape
(
self
,
idx
):
image
=
self
.
get_image
(
idx
)
return
np
.
array
(
image
.
shape
[:
2
],
dtype
=
np
.
int32
)
def
get_depth
(
self
,
idx
):
depth_filename
=
os
.
path
.
join
(
self
.
depth_dir
,
'%
06d.mat'
%
(
idx
)
)
depth_filename
=
os
.
path
.
join
(
self
.
depth_dir
,
f
'
{
idx
:
06
d
}
.mat'
)
depth
=
sio
.
loadmat
(
depth_filename
)[
'instance'
]
return
depth
def
get_calibration
(
self
,
idx
):
calib_filepath
=
os
.
path
.
join
(
self
.
calib_dir
,
'%
06d.txt'
%
(
idx
)
)
calib_filepath
=
os
.
path
.
join
(
self
.
calib_dir
,
f
'
{
idx
:
06
d
}
.txt'
)
lines
=
[
line
.
rstrip
()
for
line
in
open
(
calib_filepath
)]
Rt
=
np
.
array
([
float
(
x
)
for
x
in
lines
[
0
].
split
(
' '
)])
Rt
=
np
.
reshape
(
Rt
,
(
3
,
3
),
order
=
'F'
)
...
...
@@ -98,33 +117,43 @@ class SUNRGBDData(object):
return
K
,
Rt
def
get_label_objects
(
self
,
idx
):
label_filename
=
os
.
path
.
join
(
self
.
label_dir
,
'%
06d.txt'
%
(
idx
)
)
label_filename
=
os
.
path
.
join
(
self
.
label_dir
,
f
'
{
idx
:
06
d
}
.txt'
)
lines
=
[
line
.
rstrip
()
for
line
in
open
(
label_filename
)]
objects
=
[
SUNRGBDInstance
(
line
)
for
line
in
lines
]
return
objects
def
get_sunrgbd_infos
(
self
,
num_workers
=
4
,
has_label
=
True
,
sample_id_list
=
None
):
import
concurrent.futures
as
futures
def
get_infos
(
self
,
num_workers
=
4
,
has_label
=
True
,
sample_id_list
=
None
):
"""Get data infos.
This method gets information from the raw data.
Args:
num_workers (int): Number of threads to be used. Default: 4.
has_label (bool): Whether the data has label. Default: True.
sample_id_list (List[int]): Index list of the sample.
Default: None.
Returns:
infos (List[dict]): Information of the raw data.
"""
def
process_single_scene
(
sample_idx
):
print
(
'%s sample_idx: %s'
%
(
self
.
split
,
sample_idx
)
)
print
(
f
'
{
self
.
split
}
sample_idx:
{
sample_idx
}
'
)
# convert depth to points
SAMPLE_NUM
=
50000
# TODO: Check whether can move the point
# sampling process during training.
pc_upright_depth
=
self
.
get_depth
(
sample_idx
)
# TODO : sample points in loading process and test
pc_upright_depth_subsampled
=
random_sampling
(
pc_upright_depth
,
SAMPLE_NUM
)
np
.
savez_compressed
(
os
.
path
.
join
(
self
.
root_dir
,
'lidar'
,
'%06d.npz'
%
sample_idx
),
os
.
path
.
join
(
self
.
root_dir
,
'lidar'
,
f
'
{
sample_idx
:
06
d
}
.npz'
),
pc
=
pc_upright_depth_subsampled
)
info
=
dict
()
pc_info
=
{
'num_features'
:
6
,
'lidar_idx'
:
sample_idx
}
info
[
'point_cloud'
]
=
pc_info
img_name
=
os
.
path
.
join
(
self
.
image_dir
,
'%06d.jpg'
%
(
sample_idx
)
)
img_name
=
os
.
path
.
join
(
self
.
image_dir
,
f
'
{
sample_idx
:
06
d
}
'
)
img_path
=
os
.
path
.
join
(
self
.
image_dir
,
img_name
)
image_info
=
{
'image_idx'
:
sample_idx
,
...
...
@@ -183,8 +212,7 @@ class SUNRGBDData(object):
return
info
lidar_save_dir
=
os
.
path
.
join
(
self
.
root_dir
,
'lidar'
)
if
not
os
.
path
.
exists
(
lidar_save_dir
):
os
.
mkdir
(
lidar_save_dir
)
mmcv
.
mkdir_or_exist
(
lidar_save_dir
)
sample_id_list
=
sample_id_list
if
\
sample_id_list
is
not
None
else
self
.
sample_id_list
with
futures
.
ThreadPoolExecutor
(
num_workers
)
as
executor
:
...
...
tools/slurm_train.sh
View file @
4f1a5e52
#!/usr/bin/env bash
set
-x
export
PYTHONPATH
=
`
pwd
`
:
$PYTHONPATH
PARTITION
=
$1
JOB_NAME
=
$2
...
...
@@ -20,4 +19,4 @@ srun -p ${PARTITION} \
--ntasks-per-node
=
${
GPUS_PER_NODE
}
\
--kill-on-bad-exit
=
1
\
${
SRUN_ARGS
}
\
python
-u
tools/train.py
${
CONFIG
}
--work
_
dir
=
${
WORK_DIR
}
--launcher
=
"slurm"
${
PY_ARGS
}
python
-u
tools/train.py
${
CONFIG
}
--work
-
dir
=
${
WORK_DIR
}
--launcher
=
"slurm"
${
PY_ARGS
}
tools/train.py
View file @
4f1a5e52
from
__future__
import
division
import
argparse
import
copy
import
logging
import
os
import
os.path
as
osp
import
time
...
...
@@ -11,10 +12,11 @@ from mmcv import Config
from
mmcv.runner
import
init_dist
from
mmdet3d
import
__version__
from
mmdet3d.apis
import
train_detector
from
mmdet3d.datasets
import
build_dataset
from
mmdet3d.models
import
build_detector
from
mmdet3d.utils
import
collect_env
from
mmdet.apis
import
get_root_logger
,
set_random_seed
,
train_detector
from
mmdet3d.utils
import
collect_env
,
get_root_logger
from
mmdet.apis
import
set_random_seed
def
parse_args
():
...
...
@@ -27,12 +29,18 @@ def parse_args():
'--validate'
,
action
=
'store_true'
,
help
=
'whether to evaluate the checkpoint during training'
)
parser
.
add_argument
(
group_gpus
=
parser
.
add_mutually_exclusive_group
()
group_gpus
.
add_argument
(
'--gpus'
,
type
=
int
,
default
=
1
,
help
=
'number of gpus to use '
'(only applicable to non-distributed training)'
)
group_gpus
.
add_argument
(
'--gpu-ids'
,
type
=
int
,
nargs
=
'+'
,
help
=
'ids of gpus to use '
'(only applicable to non-distributed training)'
)
parser
.
add_argument
(
'--seed'
,
type
=
int
,
default
=
0
,
help
=
'random seed'
)
parser
.
add_argument
(
'--deterministic'
,
...
...
@@ -73,11 +81,14 @@ def main():
osp
.
splitext
(
osp
.
basename
(
args
.
config
))[
0
])
if
args
.
resume_from
is
not
None
:
cfg
.
resume_from
=
args
.
resume_from
cfg
.
gpus
=
args
.
gpus
if
args
.
gpu_ids
is
not
None
:
cfg
.
gpu_ids
=
args
.
gpu_ids
else
:
cfg
.
gpu_ids
=
range
(
1
)
if
args
.
gpus
is
None
else
range
(
args
.
gpus
)
if
args
.
autoscale_lr
:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg
.
optimizer
[
'lr'
]
=
cfg
.
optimizer
[
'lr'
]
*
cfg
.
gpu
s
/
8
cfg
.
optimizer
[
'lr'
]
=
cfg
.
optimizer
[
'lr'
]
*
len
(
cfg
.
gpu
_ids
)
/
8
# init distributed env first, since logger depends on the dist info.
if
args
.
launcher
==
'none'
:
...
...
@@ -93,6 +104,10 @@ def main():
log_file
=
osp
.
join
(
cfg
.
work_dir
,
'{}.log'
.
format
(
timestamp
))
logger
=
get_root_logger
(
log_file
=
log_file
,
log_level
=
cfg
.
log_level
)
# add a logging filter
logging_filter
=
logging
.
Filter
(
'mmdet'
)
logging_filter
.
filter
=
lambda
record
:
record
.
find
(
'mmdet'
)
!=
-
1
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta
=
dict
()
...
...
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment