Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
cbc2491f
Unverified
Commit
cbc2491f
authored
Oct 13, 2021
by
Tai-Wang
Committed by
GitHub
Oct 13, 2021
Browse files
Add code-spell pre-commit hook and fix typos (#995)
parent
6b1602f1
Changes
79
Show whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
35 additions
and
32 deletions
+35
-32
mmdet3d/ops/ball_query/ball_query.py
mmdet3d/ops/ball_query/ball_query.py
+1
-1
mmdet3d/ops/group_points/group_points.py
mmdet3d/ops/group_points/group_points.py
+1
-1
mmdet3d/ops/knn/knn.py
mmdet3d/ops/knn/knn.py
+2
-2
mmdet3d/ops/norm.py
mmdet3d/ops/norm.py
+3
-3
mmdet3d/ops/paconv/paconv.py
mmdet3d/ops/paconv/paconv.py
+2
-2
mmdet3d/ops/paconv/utils.py
mmdet3d/ops/paconv/utils.py
+2
-2
mmdet3d/ops/pointnet_modules/paconv_sa_module.py
mmdet3d/ops/pointnet_modules/paconv_sa_module.py
+1
-1
mmdet3d/ops/spconv/conv.py
mmdet3d/ops/spconv/conv.py
+5
-5
mmdet3d/ops/spconv/include/prettyprint.h
mmdet3d/ops/spconv/include/prettyprint.h
+1
-1
mmdet3d/ops/spconv/include/tensorview/tensorview.h
mmdet3d/ops/spconv/include/tensorview/tensorview.h
+2
-2
mmdet3d/ops/voxel/src/voxelization_cuda.cu
mmdet3d/ops/voxel/src/voxelization_cuda.cu
+2
-2
requirements/runtime.txt
requirements/runtime.txt
+1
-1
setup.cfg
setup.cfg
+3
-0
tests/data/kitti/kitti_infos_mono3d.coco.json
tests/data/kitti/kitti_infos_mono3d.coco.json
+1
-1
tests/data/nuscenes/nus_infos_mono3d.coco.json
tests/data/nuscenes/nus_infos_mono3d.coco.json
+1
-1
tests/test_metrics/test_losses.py
tests/test_metrics/test_losses.py
+2
-2
tests/test_utils/test_box3d.py
tests/test_utils/test_box3d.py
+1
-1
tools/data_converter/kitti_converter.py
tools/data_converter/kitti_converter.py
+2
-2
tools/data_converter/nuscenes_converter.py
tools/data_converter/nuscenes_converter.py
+2
-2
No files found.
mmdet3d/ops/ball_query/ball_query.py
View file @
cbc2491f
...
...
@@ -23,7 +23,7 @@ class BallQuery(Function):
center_xyz (Tensor): (B, npoint, 3) centers of the ball query.
Returns:
Tensor: (B, npoint, nsample) tensor with the indic
i
es of
Tensor: (B, npoint, nsample) tensor with the indices of
the features that form the query balls.
"""
assert
center_xyz
.
is_contiguous
()
...
...
mmdet3d/ops/group_points/group_points.py
View file @
cbc2491f
...
...
@@ -183,7 +183,7 @@ class GroupingOperation(Function):
Args:
features (Tensor): (B, C, N) tensor of features to group.
indices (Tensor): (B, npoint, nsample) the indic
i
es of
indices (Tensor): (B, npoint, nsample) the indices of
features to group with.
Returns:
...
...
mmdet3d/ops/knn/knn.py
View file @
cbc2491f
...
...
@@ -27,11 +27,11 @@ class KNN(Function):
center_xyz (Tensor): (B, npoint, 3) if transposed == False,
else (B, 3, npoint). centers of the knn query.
transposed (bool): whether the input tensors are transposed.
defaults to False. Should not expicitly use this keyword
defaults to False. Should not exp
l
icitly use this keyword
when calling knn (=KNN.apply), just add the fourth param.
Returns:
Tensor: (B, k, npoint) tensor with the indic
i
es of
Tensor: (B, k, npoint) tensor with the indices of
the features that form k-nearest neighbours.
"""
assert
k
>
0
...
...
mmdet3d/ops/norm.py
View file @
cbc2491f
...
...
@@ -26,7 +26,7 @@ class AllReduce(Function):
@
NORM_LAYERS
.
register_module
(
'naiveSyncBN1d'
)
class
NaiveSyncBatchNorm1d
(
nn
.
BatchNorm1d
):
"""Syncronized Batch Normalization for 3D Tensors.
"""Sync
h
ronized Batch Normalization for 3D Tensors.
Note:
This implementation is modified from
...
...
@@ -37,7 +37,7 @@ class NaiveSyncBatchNorm1d(nn.BatchNorm1d):
when the batch size on each worker is quite different
(e.g., when scale augmentation is used).
In 3D detection, different workers has points of different shapes,
whi
s
h also cause instability.
whi
c
h also cause instability.
Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`.
...
...
@@ -80,7 +80,7 @@ class NaiveSyncBatchNorm1d(nn.BatchNorm1d):
@
NORM_LAYERS
.
register_module
(
'naiveSyncBN2d'
)
class
NaiveSyncBatchNorm2d
(
nn
.
BatchNorm2d
):
"""Syncronized Batch Normalization for 4D Tensors.
"""Sync
h
ronized Batch Normalization for 4D Tensors.
Note:
This implementation is modified from
...
...
mmdet3d/ops/paconv/paconv.py
View file @
cbc2491f
...
...
@@ -83,7 +83,7 @@ class ScoreNet(nn.Module):
Args:
xyz_features (torch.Tensor): (B, C, N, K), features constructed
from xyz coordinates of point pairs. May contain relative
positions, Euclid
i
an distance, etc.
positions, Euclid
e
an distance, etc.
Returns:
torch.Tensor: (B, N, K, M), predicted scores for `M` kernels.
...
...
@@ -174,7 +174,7 @@ class PAConv(nn.Module):
# (grouped_xyz - center_xyz, grouped_xyz)
self
.
scorenet_in_channels
=
6
elif
scorenet_input
==
'w_neighbor_dist'
:
# (center_xyz, grouped_xyz - center_xyz, Euclid
i
an distance)
# (center_xyz, grouped_xyz - center_xyz, Euclid
e
an distance)
self
.
scorenet_in_channels
=
7
else
:
raise
NotImplementedError
(
...
...
mmdet3d/ops/paconv/utils.py
View file @
cbc2491f
...
...
@@ -2,14 +2,14 @@ import torch
def
calc_euclidian_dist
(
xyz1
,
xyz2
):
"""Calculate the Euclid
i
an distance between two sets of points.
"""Calculate the Euclid
e
an distance between two sets of points.
Args:
xyz1 (torch.Tensor): (N, 3), the first set of points.
xyz2 (torch.Tensor): (N, 3), the second set of points.
Returns:
torch.Tensor: (N, ), the Euclid
i
an distance between each point pair.
torch.Tensor: (N, ), the Euclid
e
an distance between each point pair.
"""
assert
xyz1
.
shape
[
0
]
==
xyz2
.
shape
[
0
],
'number of points are not the same'
assert
xyz1
.
shape
[
1
]
==
xyz2
.
shape
[
1
]
==
3
,
\
...
...
mmdet3d/ops/pointnet_modules/paconv_sa_module.py
View file @
cbc2491f
...
...
@@ -28,7 +28,7 @@ class PAConvSAModuleMSG(BasePointSAModule):
- 'w_neighbor': Use xyz coordinates and the difference with center
points as input.
- 'w_neighbor_dist': Use xyz coordinates, the difference with
center points and the Euclid
i
an distance as input.
center points and the Euclid
e
an distance as input.
scorenet_cfg (dict, optional): Config of the ScoreNet module, which
may contain the following keys and values:
...
...
mmdet3d/ops/spconv/conv.py
View file @
cbc2491f
...
...
@@ -143,16 +143,16 @@ class SparseConvolution(SparseModule):
out_tensor
.
indice_dict
=
input
.
indice_dict
out_tensor
.
grid
=
input
.
grid
return
out_tensor
data
s
=
input
.
find_indice_pair
(
self
.
indice_key
)
data
=
input
.
find_indice_pair
(
self
.
indice_key
)
if
self
.
inverse
:
assert
data
s
is
not
None
and
self
.
indice_key
is
not
None
_
,
outids
,
indice_pairs
,
indice_pair_num
,
out_spatial_shape
=
data
s
assert
data
is
not
None
and
self
.
indice_key
is
not
None
_
,
outids
,
indice_pairs
,
indice_pair_num
,
out_spatial_shape
=
data
assert
indice_pairs
.
shape
[
0
]
==
np
.
prod
(
self
.
kernel_size
),
'inverse conv must have same kernel size as its couple conv'
else
:
if
self
.
indice_key
is
not
None
and
data
s
is
not
None
:
outids
,
_
,
indice_pairs
,
indice_pair_num
,
_
=
data
s
if
self
.
indice_key
is
not
None
and
data
is
not
None
:
outids
,
_
,
indice_pairs
,
indice_pair_num
,
_
=
data
else
:
outids
,
indice_pairs
,
indice_pair_num
=
ops
.
get_indice_pairs
(
indices
,
...
...
mmdet3d/ops/spconv/include/prettyprint.h
View file @
cbc2491f
...
...
@@ -93,7 +93,7 @@ struct delimiters {
};
// Functor to print containers. You can use this directly if you want
// to specif
ic
y a non-default delimiters type. The printing logic can
// to specify a non-default delimiters type. The printing logic can
// be customized by specializing the nested template.
template
<
typename
T
,
typename
TChar
=
char
,
...
...
mmdet3d/ops/spconv/include/tensorview/tensorview.h
View file @
cbc2491f
...
...
@@ -73,7 +73,7 @@ void sstream_print(SStream &ss, T val, TArgs... args) {
if (!(expr)) { \
std::stringstream __macro_s; \
__macro_s << __FILE__ << " " << __LINE__ << "\n"; \
__macro_s << #expr << " assert faild. "; \
__macro_s << #expr << " assert fail
e
d. "; \
tv::sstream_print(__macro_s, __VA_ARGS__); \
throw std::runtime_error(__macro_s.str()); \
} \
...
...
@@ -84,7 +84,7 @@ void sstream_print(SStream &ss, T val, TArgs... args) {
if (!(expr)) { \
std::stringstream __macro_s; \
__macro_s << __FILE__ << " " << __LINE__ << "\n"; \
__macro_s << #expr << " assert faild. "; \
__macro_s << #expr << " assert fail
e
d. "; \
tv::sstream_print(__macro_s, __VA_ARGS__); \
throw std::invalid_argument(__macro_s.str()); \
} \
...
...
mmdet3d/ops/voxel/src/voxelization_cuda.cu
View file @
cbc2491f
...
...
@@ -305,7 +305,7 @@ int hard_voxelize_gpu(const at::Tensor& points, at::Tensor& voxels,
cudaDeviceSynchronize
();
AT_CUDA_CHECK
(
cudaGetLastError
());
// 3. determin voxel num and voxel's coor index
// 3. determin
e
voxel num and voxel's coor index
// make the logic in the CUDA device could accelerate about 10 times
auto
coor_to_voxelidx
=
-
at
::
ones
(
{
...
...
@@ -316,7 +316,7 @@ int hard_voxelize_gpu(const at::Tensor& points, at::Tensor& voxels,
{
1
,
},
points
.
options
().
dtype
(
at
::
kInt
));
// must be zero from the begining
points
.
options
().
dtype
(
at
::
kInt
));
// must be zero from the begin
n
ing
AT_DISPATCH_ALL_TYPES
(
temp_coors
.
scalar_type
(),
"determin_duplicate"
,
([
&
]
{
...
...
requirements/runtime.txt
View file @
cbc2491f
lyft_dataset_sdk
networkx>=2.2,<2.3
# we may unlock the verion of numba in the future
# we may unlock the ver
s
ion of numba in the future
numba==0.48.0
numpy<1.20.0
nuscenes-devkit
...
...
setup.cfg
View file @
cbc2491f
...
...
@@ -11,3 +11,6 @@ known_first_party = mmdet,mmseg,mmdet3d
known_third_party = cv2,imageio,indoor3d_util,load_scannet_data,lyft_dataset_sdk,m2r,matplotlib,mmcv,nuimages,numba,numpy,nuscenes,pandas,plyfile,pycocotools,pyquaternion,pytest,pytorch_sphinx_theme,recommonmark,scannet_utils,scipy,seaborn,shapely,skimage,tensorflow,terminaltables,torch,trimesh,waymo_open_dataset
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY
[codespell]
ignore-words-list = ans,refridgerator,crate,hist,formating,dout,wan,nd,fo
tests/data/kitti/kitti_infos_mono3d.coco.json
View file @
cbc2491f
tests/data/nuscenes/nus_infos_mono3d.coco.json
View file @
cbc2491f
tests/test_metrics/test_losses.py
View file @
cbc2491f
...
...
@@ -95,7 +95,7 @@ def test_paconv_regularization_loss():
set_random_seed
(
0
,
True
)
model
=
ToyModel
()
# reduction shoul
e
be in ['none', 'mean', 'sum']
# reduction shoul
d
be in ['none', 'mean', 'sum']
with
pytest
.
raises
(
AssertionError
):
paconv_corr_loss
=
PAConvRegularizationLoss
(
reduction
=
'l2'
)
...
...
@@ -116,7 +116,7 @@ def test_paconv_regularization_loss():
def
test_uncertain_smooth_l1_loss
():
from
mmdet3d.models.losses
import
UncertainL1Loss
,
UncertainSmoothL1Loss
# reduction shoul
e
be in ['none', 'mean', 'sum']
# reduction shoul
d
be in ['none', 'mean', 'sum']
with
pytest
.
raises
(
AssertionError
):
uncertain_l1_loss
=
UncertainL1Loss
(
reduction
=
'l2'
)
with
pytest
.
raises
(
AssertionError
):
...
...
tests/test_utils/test_box3d.py
View file @
cbc2491f
...
...
@@ -656,7 +656,7 @@ def test_boxes_conversion():
dtype
=
torch
.
float32
)
rt_mat
=
rect
@
Trv2c
# test coversion with Box type
# test co
n
version with Box type
cam_to_lidar_box
=
Box3DMode
.
convert
(
camera_boxes
,
Box3DMode
.
CAM
,
Box3DMode
.
LIDAR
,
rt_mat
.
inverse
())
assert
torch
.
allclose
(
cam_to_lidar_box
.
tensor
,
expected_tensor
)
...
...
tools/data_converter/kitti_converter.py
View file @
cbc2491f
...
...
@@ -493,7 +493,7 @@ def get_2d_boxes(info, occluded, mono3d=True):
def
generate_record
(
ann_rec
,
x1
,
y1
,
x2
,
y2
,
sample_data_token
,
filename
):
"""Generate one 2D annotation record given various information
s
on top of
"""Generate one 2D annotation record given various information on top of
the 2D bounding box coordinates.
Args:
...
...
@@ -508,7 +508,7 @@ def generate_record(ann_rec, x1, y1, x2, y2, sample_data_token, filename):
Returns:
dict: A sample 2D annotation record.
- file_name (str): f
l
ie name
- file_name (str): fi
l
e name
- image_id (str): sample data token
- area (float): 2d box area
- category_name (str): category name
...
...
tools/data_converter/nuscenes_converter.py
View file @
cbc2491f
...
...
@@ -565,7 +565,7 @@ def post_process_coords(
def
generate_record
(
ann_rec
:
dict
,
x1
:
float
,
y1
:
float
,
x2
:
float
,
y2
:
float
,
sample_data_token
:
str
,
filename
:
str
)
->
OrderedDict
:
"""Generate one 2D annotation record given various information
s
on top of
"""Generate one 2D annotation record given various information on top of
the 2D bounding box coordinates.
Args:
...
...
@@ -580,7 +580,7 @@ def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float,
Returns:
dict: A sample 2D annotation record.
- file_name (str): f
l
ie name
- file_name (str): fi
l
e name
- image_id (str): sample data token
- area (float): 2d box area
- category_name (str): category name
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment